Index: head/sys/arm/nvidia/drm2/tegra_dc.c =================================================================== --- head/sys/arm/nvidia/drm2/tegra_dc.c (revision 359440) +++ head/sys/arm/nvidia/drm2/tegra_dc.c (revision 359441) @@ -1,1447 +1,1447 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "tegra_drm_if.h" #include "tegra_dc_if.h" #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, 4 * (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, 4 * (_r)) #define LOCK(_sc) mtx_lock(&(_sc)->mtx) #define UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define SLEEP(_sc, timeout) \ mtx_sleep(sc, &sc->mtx, 0, "tegra_dc_wait", timeout); #define LOCK_INIT(_sc) \ mtx_init(&_sc->mtx, device_get_nameunit(_sc->dev), "tegra_dc", MTX_DEF) #define LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) #define SYNCPT_VBLANK0 26 #define SYNCPT_VBLANK1 27 #define DC_MAX_PLANES 2 /* Maximum planes */ /* DRM Formats supported by DC */ /* XXXX expand me */ static uint32_t dc_plane_formats[] = { DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_UYVY, DRM_FORMAT_YUYV, DRM_FORMAT_YUV420, DRM_FORMAT_YUV422, }; /* Complete description of one window (plane) */ struct dc_window { /* Source (in framebuffer) rectangle, in pixels */ u_int src_x; u_int src_y; u_int src_w; u_int src_h; /* Destination (on display) rectangle, in pixels */ u_int dst_x; u_int dst_y; u_int dst_w; u_int dst_h; /* Parsed pixel format */ u_int bits_per_pixel; bool is_yuv; /* any YUV mode */ bool is_yuv_planar; /* planar YUV mode */ uint32_t color_mode; /* DC_WIN_COLOR_DEPTH */ uint32_t swap; /* DC_WIN_BYTE_SWAP */ uint32_t surface_kind; /* DC_WINBUF_SURFACE_KIND */ uint32_t block_height; /* DC_WINBUF_SURFACE_KIND */ /* Parsed flipping, rotation is not supported for pitched modes */ bool flip_x; /* inverted X-axis */ bool flip_y; /* inverted Y-axis */ bool transpose_xy; /* swap X and Y-axis */ /* Color planes base addresses and strides */ bus_size_t base[3]; uint32_t stride[3]; /* stride[2] isn't used by HW */ }; struct dc_softc { device_t dev; struct resource *mem_res; struct resource *irq_res; void *irq_ih; struct mtx mtx; clk_t clk_parent; clk_t clk_dc; hwreset_t hwreset_dc; int pitch_align; struct tegra_crtc tegra_crtc; struct drm_pending_vblank_event *event; struct drm_gem_object *cursor_gem; }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-dc", 1}, {NULL, 0}, }; /* Convert standard drm pixel format to tegra windows parameters. */ static int dc_parse_drm_format(struct tegra_fb *fb, struct dc_window *win) { struct tegra_bo *bo; uint32_t cm; uint32_t sw; bool is_yuv, is_yuv_planar; int nplanes, i; switch (fb->drm_fb.pixel_format) { case DRM_FORMAT_XBGR8888: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_R8G8B8A8; is_yuv = false; is_yuv_planar = false; break; case DRM_FORMAT_XRGB8888: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_B8G8R8A8; is_yuv = false; is_yuv_planar = false; break; case DRM_FORMAT_RGB565: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_B5G6R5; is_yuv = false; is_yuv_planar = false; break; case DRM_FORMAT_UYVY: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_YCbCr422; is_yuv = true; is_yuv_planar = false; break; case DRM_FORMAT_YUYV: sw = BYTE_SWAP(SWAP2); cm = WIN_COLOR_DEPTH_YCbCr422; is_yuv = true; is_yuv_planar = false; break; case DRM_FORMAT_YUV420: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_YCbCr420P; is_yuv = true; is_yuv_planar = true; break; case DRM_FORMAT_YUV422: sw = BYTE_SWAP(NOSWAP); cm = WIN_COLOR_DEPTH_YCbCr422P; is_yuv = true; is_yuv_planar = true; break; default: /* Unsupported format */ return (-EINVAL); } /* Basic check of arguments. */ switch (fb->rotation) { case 0: case 180: break; case 90: /* Rotation is supported only */ case 270: /* for block linear surfaces */ if (!fb->block_linear) return (-EINVAL); break; default: return (-EINVAL); } /* XXX Add more checks (sizes, scaling...) */ if (win == NULL) return (0); win->surface_kind = fb->block_linear ? SURFACE_KIND_BL_16B2: SURFACE_KIND_PITCH; win->block_height = fb->block_height; switch (fb->rotation) { case 0: /* (0,0,0) */ win->transpose_xy = false; win->flip_x = false; win->flip_y = false; break; case 90: /* (1,0,1) */ win->transpose_xy = true; win->flip_x = false; win->flip_y = true; break; case 180: /* (0,1,1) */ win->transpose_xy = false; win->flip_x = true; win->flip_y = true; break; case 270: /* (1,1,0) */ win->transpose_xy = true; win->flip_x = true; win->flip_y = false; break; } win->flip_x ^= fb->flip_x; win->flip_y ^= fb->flip_y; win->color_mode = cm; win->swap = sw; win->bits_per_pixel = fb->drm_fb.bits_per_pixel; win->is_yuv = is_yuv; win->is_yuv_planar = is_yuv_planar; nplanes = drm_format_num_planes(fb->drm_fb.pixel_format); for (i = 0; i < nplanes; i++) { bo = fb->planes[i]; win->base[i] = bo->pbase + fb->drm_fb.offsets[i]; win->stride[i] = fb->drm_fb.pitches[i]; } return (0); } /* * Scaling functions. * * It's unclear if we want/must program the fractional portion * (aka bias) of init_dda registers, mainly when mirrored axis * modes are used. * For now, we use 1.0 as recommended by TRM. */ static inline uint32_t dc_scaling_init(uint32_t start) { return (1 << 12); } static inline uint32_t dc_scaling_incr(uint32_t src, uint32_t dst, uint32_t maxscale) { uint32_t val; val = (src - 1) << 12 ; /* 4.12 fixed float */ val /= (dst - 1); if (val > (maxscale << 12)) val = maxscale << 12; return val; } /* ------------------------------------------------------------------- * * HW Access. * */ /* * Setup pixel clock. * Minimal frequency is pixel clock, but output is free to select * any higher. */ static int dc_setup_clk(struct dc_softc *sc, struct drm_crtc *crtc, struct drm_display_mode *mode, uint32_t *div) { uint64_t pclk, freq; struct tegra_drm_encoder *output; struct drm_encoder *encoder; long rv; pclk = mode->clock * 1000; /* Find attached encoder */ output = NULL; list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { output = container_of(encoder, struct tegra_drm_encoder, encoder); break; } } if (output == NULL) return (-ENODEV); if (output->setup_clock == NULL) panic("Output have not setup_clock function.\n"); rv = output->setup_clock(output, sc->clk_dc, pclk); if (rv != 0) { device_printf(sc->dev, "Cannot setup pixel clock: %llu\n", pclk); return (rv); } rv = clk_get_freq(sc->clk_dc, &freq); *div = (freq * 2 / pclk) - 2; DRM_DEBUG_KMS("frequency: %llu, DC divider: %u\n", freq, *div); return 0; } static void dc_setup_window(struct dc_softc *sc, unsigned int index, struct dc_window *win) { uint32_t h_offset, v_offset, h_size, v_size, bpp; uint32_t h_init_dda, v_init_dda, h_incr_dda, v_incr_dda; uint32_t val; #ifdef DMR_DEBUG_WINDOW printf("%s window: %d\n", __func__, index); printf(" src: x: %d, y: %d, w: %d, h: %d\n", win->src_x, win->src_y, win->src_w, win->src_h); printf(" dst: x: %d, y: %d, w: %d, h: %d\n", win->dst_x, win->dst_y, win->dst_w, win->dst_h); printf(" bpp: %d, color_mode: %d, swap: %d\n", win->bits_per_pixel, win->color_mode, win->swap); #endif if (win->is_yuv) bpp = win->is_yuv_planar ? 1 : 2; else bpp = (win->bits_per_pixel + 7) / 8; if (!win->transpose_xy) { h_size = win->src_w * bpp; v_size = win->src_h; } else { h_size = win->src_h * bpp; v_size = win->src_w; } - h_offset = win->src_x * bpp;; + h_offset = win->src_x * bpp; v_offset = win->src_y; if (win->flip_x) { h_offset += win->src_w * bpp - 1; } if (win->flip_y) v_offset += win->src_h - 1; /* Adjust offsets for planar yuv modes */ if (win->is_yuv_planar) { h_offset &= ~1; if (win->flip_x ) h_offset |= 1; v_offset &= ~1; if (win->flip_y ) v_offset |= 1; } /* Setup scaling. */ if (!win->transpose_xy) { h_init_dda = dc_scaling_init(win->src_x); v_init_dda = dc_scaling_init(win->src_y); h_incr_dda = dc_scaling_incr(win->src_w, win->dst_w, 4); v_incr_dda = dc_scaling_incr(win->src_h, win->dst_h, 15); } else { h_init_dda = dc_scaling_init(win->src_y); v_init_dda = dc_scaling_init(win->src_x); h_incr_dda = dc_scaling_incr(win->src_h, win->dst_h, 4); v_incr_dda = dc_scaling_incr(win->src_w, win->dst_w, 15); } #ifdef DMR_DEBUG_WINDOW printf("\n"); printf(" bpp: %d, size: h: %d v: %d, offset: h:%d v: %d\n", bpp, h_size, v_size, h_offset, v_offset); printf(" init_dda: h: %d v: %d, incr_dda: h: %d v: %d\n", h_init_dda, v_init_dda, h_incr_dda, v_incr_dda); #endif LOCK(sc); /* Select target window */ val = WINDOW_A_SELECT << index; WR4(sc, DC_CMD_DISPLAY_WINDOW_HEADER, val); /* Sizes */ WR4(sc, DC_WIN_POSITION, WIN_POSITION(win->dst_x, win->dst_y)); WR4(sc, DC_WIN_SIZE, WIN_SIZE(win->dst_w, win->dst_h)); WR4(sc, DC_WIN_PRESCALED_SIZE, WIN_PRESCALED_SIZE(h_size, v_size)); /* DDA */ WR4(sc, DC_WIN_DDA_INCREMENT, WIN_DDA_INCREMENT(h_incr_dda, v_incr_dda)); WR4(sc, DC_WIN_H_INITIAL_DDA, h_init_dda); WR4(sc, DC_WIN_V_INITIAL_DDA, v_init_dda); /* Color planes base addresses and strides */ WR4(sc, DC_WINBUF_START_ADDR, win->base[0]); if (win->is_yuv_planar) { WR4(sc, DC_WINBUF_START_ADDR_U, win->base[1]); WR4(sc, DC_WINBUF_START_ADDR_V, win->base[2]); WR4(sc, DC_WIN_LINE_STRIDE, win->stride[1] << 16 | win->stride[0]); } else { WR4(sc, DC_WIN_LINE_STRIDE, win->stride[0]); } /* Offsets for rotation and axis flip */ WR4(sc, DC_WINBUF_ADDR_H_OFFSET, h_offset); WR4(sc, DC_WINBUF_ADDR_V_OFFSET, v_offset); /* Color format */ WR4(sc, DC_WIN_COLOR_DEPTH, win->color_mode); WR4(sc, DC_WIN_BYTE_SWAP, win->swap); /* Tiling */ val = win->surface_kind; if (win->surface_kind == SURFACE_KIND_BL_16B2) val |= SURFACE_KIND_BLOCK_HEIGHT(win->block_height); WR4(sc, DC_WINBUF_SURFACE_KIND, val); /* Color space coefs for YUV modes */ if (win->is_yuv) { WR4(sc, DC_WINC_CSC_YOF, 0x00f0); WR4(sc, DC_WINC_CSC_KYRGB, 0x012a); WR4(sc, DC_WINC_CSC_KUR, 0x0000); WR4(sc, DC_WINC_CSC_KVR, 0x0198); WR4(sc, DC_WINC_CSC_KUG, 0x039b); WR4(sc, DC_WINC_CSC_KVG, 0x032f); WR4(sc, DC_WINC_CSC_KUB, 0x0204); WR4(sc, DC_WINC_CSC_KVB, 0x0000); } val = WIN_ENABLE; if (win->is_yuv) val |= CSC_ENABLE; else if (win->bits_per_pixel < 24) val |= COLOR_EXPAND; if (win->flip_y) val |= V_DIRECTION; if (win->flip_x) val |= H_DIRECTION; if (win->transpose_xy) val |= SCAN_COLUMN; WR4(sc, DC_WINC_WIN_OPTIONS, val); #ifdef DMR_DEBUG_WINDOW /* Set underflow debug mode -> highlight missing pixels. */ WR4(sc, DC_WINBUF_UFLOW_CTRL, UFLOW_CTR_ENABLE); WR4(sc, DC_WINBUF_UFLOW_DBG_PIXEL, 0xFFFF0000); #endif UNLOCK(sc); } /* ------------------------------------------------------------------- * * Plane functions. * */ static int dc_plane_update(struct drm_plane *drm_plane, struct drm_crtc *drm_crtc, struct drm_framebuffer *drm_fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct tegra_plane *plane; struct tegra_crtc *crtc; struct tegra_fb *fb; struct dc_softc *sc; struct dc_window win; int rv; plane = container_of(drm_plane, struct tegra_plane, drm_plane); fb = container_of(drm_fb, struct tegra_fb, drm_fb); crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); memset(&win, 0, sizeof(win)); win.src_x = src_x >> 16; win.src_y = src_y >> 16; win.src_w = src_w >> 16; win.src_h = src_h >> 16; win.dst_x = crtc_x; win.dst_y = crtc_y; win.dst_w = crtc_w; win.dst_h = crtc_h; rv = dc_parse_drm_format(fb, &win); if (rv != 0) { DRM_WARNING("unsupported pixel format %d\n", fb->drm_fb.pixel_format); return (rv); } dc_setup_window(sc, plane->index, &win); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_UPDATE << plane->index); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_ACT_REQ << plane->index); return (0); } static int dc_plane_disable(struct drm_plane *drm_plane) { struct tegra_plane *plane; struct tegra_crtc *crtc; struct dc_softc *sc; uint32_t val, idx; if (drm_plane->crtc == NULL) return (0); plane = container_of(drm_plane, struct tegra_plane, drm_plane); crtc = container_of(drm_plane->crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); idx = plane->index; LOCK(sc); WR4(sc, DC_CMD_DISPLAY_WINDOW_HEADER, WINDOW_A_SELECT << idx); val = RD4(sc, DC_WINC_WIN_OPTIONS); val &= ~WIN_ENABLE; WR4(sc, DC_WINC_WIN_OPTIONS, val); UNLOCK(sc); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_UPDATE << idx); WR4(sc, DC_CMD_STATE_CONTROL, WIN_A_ACT_REQ << idx); return (0); } static void dc_plane_destroy(struct drm_plane *plane) { dc_plane_disable(plane); drm_plane_cleanup(plane); free(plane, DRM_MEM_KMS); } static const struct drm_plane_funcs dc_plane_funcs = { .update_plane = dc_plane_update, .disable_plane = dc_plane_disable, .destroy = dc_plane_destroy, }; /* ------------------------------------------------------------------- * * CRTC helper functions. * */ static void dc_crtc_dpms(struct drm_crtc *crtc, int mode) { /* Empty function */ } static bool dc_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted) { return (true); } static int dc_set_base(struct dc_softc *sc, int x, int y, struct tegra_fb *fb) { struct dc_window win; int rv; memset(&win, 0, sizeof(win)); win.src_x = x; win.src_y = y; win.src_w = fb->drm_fb.width; win.src_h = fb->drm_fb.height; win.dst_x = x; win.dst_y = y; win.dst_w = fb->drm_fb.width; win.dst_h = fb->drm_fb.height; rv = dc_parse_drm_format(fb, &win); if (rv != 0) { DRM_WARNING("unsupported pixel format %d\n", fb->drm_fb.pixel_format); return (rv); } dc_setup_window(sc, 0, &win); return (0); } static int dc_crtc_mode_set(struct drm_crtc *drm_crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted, int x, int y, struct drm_framebuffer *old_fb) { struct dc_softc *sc; struct tegra_crtc *crtc; struct tegra_fb *fb; struct dc_window win; uint32_t div, h_ref_to_sync, v_ref_to_sync; int rv; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); h_ref_to_sync = 1; v_ref_to_sync = 1; /* Setup timing */ rv = dc_setup_clk(sc, drm_crtc, mode, &div); if (rv != 0) { device_printf(sc->dev, "Cannot set pixel clock\n"); return (rv); } /* Timing */ WR4(sc, DC_DISP_DISP_TIMING_OPTIONS, 0); WR4(sc, DC_DISP_REF_TO_SYNC, (v_ref_to_sync << 16) | h_ref_to_sync); WR4(sc, DC_DISP_SYNC_WIDTH, ((mode->vsync_end - mode->vsync_start) << 16) | ((mode->hsync_end - mode->hsync_start) << 0)); WR4(sc, DC_DISP_BACK_PORCH, ((mode->vtotal - mode->vsync_end) << 16) | ((mode->htotal - mode->hsync_end) << 0)); WR4(sc, DC_DISP_FRONT_PORCH, ((mode->vsync_start - mode->vdisplay) << 16) | ((mode->hsync_start - mode->hdisplay) << 0)); WR4(sc, DC_DISP_DISP_ACTIVE, (mode->vdisplay << 16) | mode->hdisplay); WR4(sc, DC_DISP_DISP_INTERFACE_CONTROL, DISP_DATA_FORMAT(DF1P1C)); WR4(sc,DC_DISP_DISP_CLOCK_CONTROL, SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER(PCD1)); memset(&win, 0, sizeof(win)); win.src_x = x; win.src_y = y; win.src_w = mode->hdisplay; win.src_h = mode->vdisplay; win.dst_x = x; win.dst_y = y; win.dst_w = mode->hdisplay; win.dst_h = mode->vdisplay; rv = dc_parse_drm_format(fb, &win); if (rv != 0) { DRM_WARNING("unsupported pixel format %d\n", drm_crtc->fb->pixel_format); return (rv); } dc_setup_window(sc, 0, &win); return (0); } static int dc_crtc_mode_set_base(struct drm_crtc *drm_crtc, int x, int y, struct drm_framebuffer *old_fb) { struct dc_softc *sc; struct tegra_crtc *crtc; struct tegra_fb *fb; int rv; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); sc = device_get_softc(crtc->dev); rv = dc_set_base(sc, x, y, fb); /* Commit */ WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | WIN_A_UPDATE); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ | WIN_A_ACT_REQ); return (rv); } static void dc_crtc_prepare(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); WR4(sc, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL, SYNCPT_CNTRL_NO_STALL); /* XXX allocate syncpoint from host1x */ WR4(sc, DC_CMD_CONT_SYNCPT_VSYNC, SYNCPT_VSYNC_ENABLE | (sc->tegra_crtc.nvidia_head == 0 ? SYNCPT_VBLANK0: SYNCPT_VBLANK1)); WR4(sc, DC_CMD_DISPLAY_POWER_CONTROL, PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); val = RD4(sc, DC_CMD_DISPLAY_COMMAND); val |= DISPLAY_CTRL_MODE(CTRL_MODE_C_DISPLAY); WR4(sc, DC_CMD_DISPLAY_COMMAND, val); WR4(sc, DC_CMD_INT_MASK, WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); WR4(sc, DC_CMD_INT_ENABLE, VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); } static void dc_crtc_commit(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | WIN_A_UPDATE); val = RD4(sc, DC_CMD_INT_MASK); val |= FRAME_END_INT; WR4(sc, DC_CMD_INT_MASK, val); val = RD4(sc, DC_CMD_INT_ENABLE); val |= FRAME_END_INT; WR4(sc, DC_CMD_INT_ENABLE, val); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ | WIN_A_ACT_REQ); } static void dc_crtc_load_lut(struct drm_crtc *crtc) { /* empty function */ } static const struct drm_crtc_helper_funcs dc_crtc_helper_funcs = { .dpms = dc_crtc_dpms, .mode_fixup = dc_crtc_mode_fixup, .mode_set = dc_crtc_mode_set, .mode_set_base = dc_crtc_mode_set_base, .prepare = dc_crtc_prepare, .commit = dc_crtc_commit, .load_lut = dc_crtc_load_lut, }; static int drm_crtc_index(struct drm_crtc *crtc) { int idx; struct drm_crtc *tmp; idx = 0; list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { if (tmp == crtc) return (idx); idx++; } panic("Cannot find CRTC"); } /* ------------------------------------------------------------------- * * Exported functions (mainly vsync related). * * XXX revisit this -> convert to bus methods? */ int tegra_dc_get_pipe(struct drm_crtc *drm_crtc) { struct tegra_crtc *crtc; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); return (crtc->nvidia_head); } void tegra_dc_enable_vblank(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); LOCK(sc); val = RD4(sc, DC_CMD_INT_MASK); val |= VBLANK_INT; WR4(sc, DC_CMD_INT_MASK, val); UNLOCK(sc); } void tegra_dc_disable_vblank(struct drm_crtc *drm_crtc) { struct dc_softc *sc; struct tegra_crtc *crtc; uint32_t val; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); LOCK(sc); val = RD4(sc, DC_CMD_INT_MASK); val &= ~VBLANK_INT; WR4(sc, DC_CMD_INT_MASK, val); UNLOCK(sc); } static void dc_finish_page_flip(struct dc_softc *sc) { struct drm_crtc *drm_crtc; struct drm_device *drm; struct tegra_fb *fb; struct tegra_bo *bo; uint32_t base; int idx; drm_crtc = &sc->tegra_crtc.drm_crtc; drm = drm_crtc->dev; fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); mtx_lock(&drm->event_lock); if (sc->event == NULL) { mtx_unlock(&drm->event_lock); return; } LOCK(sc); /* Read active copy of WINBUF_START_ADDR */ WR4(sc, DC_CMD_DISPLAY_WINDOW_HEADER, WINDOW_A_SELECT); WR4(sc, DC_CMD_STATE_ACCESS, READ_MUX); base = RD4(sc, DC_WINBUF_START_ADDR); WR4(sc, DC_CMD_STATE_ACCESS, 0); UNLOCK(sc); /* Is already active */ bo = tegra_fb_get_plane(fb, 0); if (base == (bo->pbase + fb->drm_fb.offsets[0])) { idx = drm_crtc_index(drm_crtc); drm_send_vblank_event(drm, idx, sc->event); drm_vblank_put(drm, idx); sc->event = NULL; } mtx_unlock(&drm->event_lock); } void tegra_dc_cancel_page_flip(struct drm_crtc *drm_crtc, struct drm_file *file) { struct dc_softc *sc; struct tegra_crtc *crtc; struct drm_device *drm; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); drm = drm_crtc->dev; mtx_lock(&drm->event_lock); if ((sc->event != NULL) && (sc->event->base.file_priv == file)) { sc->event->base.destroy(&sc->event->base); drm_vblank_put(drm, drm_crtc_index(drm_crtc)); sc->event = NULL; } mtx_unlock(&drm->event_lock); } /* ------------------------------------------------------------------- * * CRTC functions. * */ static int dc_page_flip(struct drm_crtc *drm_crtc, struct drm_framebuffer *drm_fb, struct drm_pending_vblank_event *event) { struct dc_softc *sc; struct tegra_crtc *crtc; struct tegra_fb *fb; struct drm_device *drm; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); fb = container_of(drm_crtc->fb, struct tegra_fb, drm_fb); drm = drm_crtc->dev; if (sc->event != NULL) return (-EBUSY); if (event != NULL) { event->pipe = sc->tegra_crtc.nvidia_head; sc->event = event; drm_vblank_get(drm, event->pipe); } dc_set_base(sc, drm_crtc->x, drm_crtc->y, fb); drm_crtc->fb = drm_fb; /* Commit */ WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | WIN_A_UPDATE); return (0); } static int dc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file, uint32_t handle, uint32_t width, uint32_t height) { struct dc_softc *sc; struct tegra_crtc *crtc; struct drm_gem_object *gem; struct tegra_bo *bo; int i; uint32_t val, *src, *dst; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); if (width != height) return (-EINVAL); switch (width) { case 32: val = CURSOR_SIZE(C32x32); break; case 64: val = CURSOR_SIZE(C64x64); break; case 128: val = CURSOR_SIZE(C128x128); break; case 256: val = CURSOR_SIZE(C256x256); break; default: return (-EINVAL); } bo = NULL; gem = NULL; if (handle != 0) { gem = drm_gem_object_lookup(drm_crtc->dev, file, handle); if (gem == NULL) return (-ENOENT); bo = container_of(gem, struct tegra_bo, gem_obj); } if (sc->cursor_gem != NULL) { drm_gem_object_unreference(sc->cursor_gem); } sc->cursor_gem = gem; if (bo != NULL) { /* * Copy cursor into cache and convert it from ARGB to RGBA. * XXXX - this is broken by design - client can write to BO at * any time. We can dedicate other window for cursor or switch * to sw cursor in worst case. */ src = (uint32_t *)bo->vbase; dst = (uint32_t *)crtc->cursor_vbase; for (i = 0; i < width * height; i++) dst[i] = (src[i] << 8) | (src[i] >> 24); val |= CURSOR_CLIP(CC_DISPLAY); val |= CURSOR_START_ADDR(crtc->cursor_pbase); WR4(sc, DC_DISP_CURSOR_START_ADDR, val); val = RD4(sc, DC_DISP_BLEND_CURSOR_CONTROL); val &= ~CURSOR_DST_BLEND_FACTOR_SELECT(~0); val &= ~CURSOR_SRC_BLEND_FACTOR_SELECT(~0); val |= CURSOR_MODE_SELECT; val |= CURSOR_DST_BLEND_FACTOR_SELECT(DST_NEG_K1_TIMES_SRC); val |= CURSOR_SRC_BLEND_FACTOR_SELECT(SRC_BLEND_K1_TIMES_SRC); val |= CURSOR_ALPHA(~0); WR4(sc, DC_DISP_BLEND_CURSOR_CONTROL, val); val = RD4(sc, DC_DISP_DISP_WIN_OPTIONS); val |= CURSOR_ENABLE; WR4(sc, DC_DISP_DISP_WIN_OPTIONS, val); } else { val = RD4(sc, DC_DISP_DISP_WIN_OPTIONS); val &= ~CURSOR_ENABLE; WR4(sc, DC_DISP_DISP_WIN_OPTIONS, val); } /* XXX This fixes cursor underflow issues, but why ? */ WR4(sc, DC_DISP_CURSOR_UNDERFLOW_CTRL, CURSOR_UFLOW_CYA); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE | CURSOR_UPDATE ); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ | CURSOR_ACT_REQ); return (0); } static int dc_cursor_move(struct drm_crtc *drm_crtc, int x, int y) { struct dc_softc *sc; struct tegra_crtc *crtc; crtc = container_of(drm_crtc, struct tegra_crtc, drm_crtc); sc = device_get_softc(crtc->dev); WR4(sc, DC_DISP_CURSOR_POSITION, CURSOR_POSITION(x, y)); WR4(sc, DC_CMD_STATE_CONTROL, CURSOR_UPDATE); WR4(sc, DC_CMD_STATE_CONTROL, CURSOR_ACT_REQ); return (0); } static void dc_destroy(struct drm_crtc *crtc) { drm_crtc_cleanup(crtc); memset(crtc, 0, sizeof(*crtc)); } static const struct drm_crtc_funcs dc_crtc_funcs = { .page_flip = dc_page_flip, .cursor_set = dc_cursor_set, .cursor_move = dc_cursor_move, .set_config = drm_crtc_helper_set_config, .destroy = dc_destroy, }; /* ------------------------------------------------------------------- * * Bus and infrastructure. * */ static int dc_init_planes(struct dc_softc *sc, struct tegra_drm *drm) { int i, rv; struct tegra_plane *plane; rv = 0; for (i = 0; i < DC_MAX_PLANES; i++) { plane = malloc(sizeof(*plane), DRM_MEM_KMS, M_WAITOK | M_ZERO); plane->index = i + 1; rv = drm_plane_init(&drm->drm_dev, &plane->drm_plane, 1 << sc->tegra_crtc.nvidia_head, &dc_plane_funcs, dc_plane_formats, nitems(dc_plane_formats), false); if (rv != 0) { free(plane, DRM_MEM_KMS); return (rv); } } return 0; } static void dc_display_enable(device_t dev, bool enable) { struct dc_softc *sc; uint32_t val; sc = device_get_softc(dev); /* Set display mode */ val = enable ? CTRL_MODE_C_DISPLAY: CTRL_MODE_STOP; WR4(sc, DC_CMD_DISPLAY_COMMAND, DISPLAY_CTRL_MODE(val)); /* and commit it*/ WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_UPDATE); WR4(sc, DC_CMD_STATE_CONTROL, GENERAL_ACT_REQ); } static void dc_hdmi_enable(device_t dev, bool enable) { struct dc_softc *sc; uint32_t val; sc = device_get_softc(dev); val = RD4(sc, DC_DISP_DISP_WIN_OPTIONS); if (enable) val |= HDMI_ENABLE; else val &= ~HDMI_ENABLE; WR4(sc, DC_DISP_DISP_WIN_OPTIONS, val); } static void dc_setup_timing(device_t dev, int h_pulse_start) { struct dc_softc *sc; sc = device_get_softc(dev); /* Setup display timing */ WR4(sc, DC_DISP_DISP_TIMING_OPTIONS, VSYNC_H_POSITION(1)); WR4(sc, DC_DISP_DISP_COLOR_CONTROL, DITHER_CONTROL(DITHER_DISABLE) | BASE_COLOR_SIZE(SIZE_BASE888)); WR4(sc, DC_DISP_DISP_SIGNAL_OPTIONS0, H_PULSE2_ENABLE); WR4(sc, DC_DISP_H_PULSE2_CONTROL, PULSE_CONTROL_QUAL(QUAL_VACTIVE) | PULSE_CONTROL_LAST(LAST_END_A)); WR4(sc, DC_DISP_H_PULSE2_POSITION_A, PULSE_START(h_pulse_start) | PULSE_END(h_pulse_start + 8)); } static void dc_intr(void *arg) { struct dc_softc *sc; uint32_t status; sc = arg; /* Confirm interrupt */ status = RD4(sc, DC_CMD_INT_STATUS); WR4(sc, DC_CMD_INT_STATUS, status); if (status & VBLANK_INT) { drm_handle_vblank(sc->tegra_crtc.drm_crtc.dev, sc->tegra_crtc.nvidia_head); dc_finish_page_flip(sc); } } static int dc_init_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct dc_softc *sc; int rv; sc = device_get_softc(dev); if (drm->pitch_align < sc->pitch_align) drm->pitch_align = sc->pitch_align; drm_crtc_init(&drm->drm_dev, &sc->tegra_crtc.drm_crtc, &dc_crtc_funcs); drm_mode_crtc_set_gamma_size(&sc->tegra_crtc.drm_crtc, 256); drm_crtc_helper_add(&sc->tegra_crtc.drm_crtc, &dc_crtc_helper_funcs); rv = dc_init_planes(sc, drm); if (rv!= 0){ device_printf(dev, "Cannot init planes\n"); return (rv); } WR4(sc, DC_CMD_INT_TYPE, WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); WR4(sc, DC_CMD_INT_POLARITY, WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT); WR4(sc, DC_CMD_INT_ENABLE, 0); WR4(sc, DC_CMD_INT_MASK, 0); rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, dc_intr, sc, &sc->irq_ih); if (rv != 0) { device_printf(dev, "Cannot register interrupt handler\n"); return (rv); } /* allocate memory for cursor cache */ sc->tegra_crtc.cursor_vbase = kmem_alloc_contig(256 * 256 * 4, M_WAITOK | M_ZERO, 0, -1UL, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING); sc->tegra_crtc.cursor_pbase = vtophys(sc->tegra_crtc.cursor_vbase); return (0); } static int dc_exit_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct dc_softc *sc; sc = device_get_softc(dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); sc->irq_ih = NULL; return (0); } static int get_fdt_resources(struct dc_softc *sc, phandle_t node) { int rv; rv = hwreset_get_by_ofw_name(sc->dev, 0, "dc", &sc->hwreset_dc); if (rv != 0) { device_printf(sc->dev, "Cannot get 'dc' reset\n"); return (rv); } rv = clk_get_by_ofw_name(sc->dev, 0, "parent", &sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot get 'parent' clock\n"); return (rv); } rv = clk_get_by_ofw_name(sc->dev, 0, "dc", &sc->clk_dc); if (rv != 0) { device_printf(sc->dev, "Cannot get 'dc' clock\n"); return (rv); } rv = OF_getencprop(node, "nvidia,head", &sc->tegra_crtc.nvidia_head, sizeof(sc->tegra_crtc.nvidia_head)); if (rv <= 0) { device_printf(sc->dev, "Cannot get 'nvidia,head' property\n"); return (rv); } return (0); } static int enable_fdt_resources(struct dc_softc *sc) { int id, rv; rv = clk_set_parent_by_clk(sc->clk_dc, sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot set parent for 'dc' clock\n"); return (rv); } id = (sc->tegra_crtc.nvidia_head == 0) ? TEGRA_POWERGATE_DIS: TEGRA_POWERGATE_DISB; rv = tegra_powergate_sequence_power_up(id, sc->clk_dc, sc->hwreset_dc); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'DIS' powergate\n"); return (rv); } return (0); } static int dc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Tegra Display Controller"); return (BUS_PROBE_DEFAULT); } static int dc_attach(device_t dev) { struct dc_softc *sc; phandle_t node; int rid, rv; sc = device_get_softc(dev); sc->dev = dev; sc->tegra_crtc.dev = dev; node = ofw_bus_get_node(sc->dev); LOCK_INIT(sc); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); goto fail; } rv = get_fdt_resources(sc, node); if (rv != 0) { device_printf(dev, "Cannot parse FDT resources\n"); goto fail; } rv = enable_fdt_resources(sc); if (rv != 0) { device_printf(dev, "Cannot enable FDT resources\n"); goto fail; } /* * Tegra124 * - 64 for RGB modes * - 128 for YUV planar modes * - 256 for block linear modes */ sc->pitch_align = 256; rv = TEGRA_DRM_REGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (rv != 0) { device_printf(dev, "Cannot register DRM device\n"); goto fail; } return (bus_generic_attach(dev)); fail: TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_dc != NULL) clk_release(sc->clk_dc); if (sc->hwreset_dc != NULL) hwreset_release(sc->hwreset_dc); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (ENXIO); } static int dc_detach(device_t dev) { struct dc_softc *sc; sc = device_get_softc(dev); TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_dc != NULL) clk_release(sc->clk_dc); if (sc->hwreset_dc != NULL) hwreset_release(sc->hwreset_dc); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (bus_generic_detach(dev)); } static device_method_t tegra_dc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dc_probe), DEVMETHOD(device_attach, dc_attach), DEVMETHOD(device_detach, dc_detach), /* tegra drm interface */ DEVMETHOD(tegra_drm_init_client, dc_init_client), DEVMETHOD(tegra_drm_exit_client, dc_exit_client), /* tegra dc interface */ DEVMETHOD(tegra_dc_display_enable, dc_display_enable), DEVMETHOD(tegra_dc_hdmi_enable, dc_hdmi_enable), DEVMETHOD(tegra_dc_setup_timing, dc_setup_timing), DEVMETHOD_END }; static devclass_t tegra_dc_devclass; DEFINE_CLASS_0(tegra_dc, tegra_dc_driver, tegra_dc_methods, sizeof(struct dc_softc)); DRIVER_MODULE(tegra_dc, host1x, tegra_dc_driver, tegra_dc_devclass, NULL, NULL); Index: head/sys/arm/nvidia/drm2/tegra_hdmi.c =================================================================== --- head/sys/arm/nvidia/drm2/tegra_hdmi.c (revision 359440) +++ head/sys/arm/nvidia/drm2/tegra_hdmi.c (revision 359441) @@ -1,1326 +1,1326 @@ /*- * Copyright (c) 2015 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "tegra_dc_if.h" #include "tegra_drm_if.h" #define WR4(_sc, _r, _v) bus_write_4((_sc)->mem_res, 4 * (_r), (_v)) #define RD4(_sc, _r) bus_read_4((_sc)->mem_res, 4 * (_r)) /* HDA stream format verb. */ #define AC_FMT_CHAN_GET(x) (((x) >> 0) & 0xf) #define AC_FMT_CHAN_BITS_GET(x) (((x) >> 4) & 0x7) #define AC_FMT_DIV_GET(x) (((x) >> 8) & 0x7) #define AC_FMT_MUL_GET(x) (((x) >> 11) & 0x7) #define AC_FMT_BASE_44K (1 << 14) #define AC_FMT_TYPE_NON_PCM (1 << 15) #define HDMI_REKEY_DEFAULT 56 #define HDMI_ELD_BUFFER_SIZE 96 #define HDMI_DC_CLOCK_MULTIPIER 2 struct audio_reg { uint32_t audio_clk; bus_size_t acr_reg; bus_size_t nval_reg; bus_size_t aval_reg; }; static const struct audio_reg audio_regs[] = { { .audio_clk = 32000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0320, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320, }, { .audio_clk = 44100, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0441, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441, }, { .audio_clk = 88200, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0882, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882, }, { .audio_clk = 176400, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_1764, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764, }, { .audio_clk = 48000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0480, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480, }, { .audio_clk = 96000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_0960, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960, }, { .audio_clk = 192000, .acr_reg = HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW, .nval_reg = HDMI_NV_PDISP_SOR_AUDIO_NVAL_1920, .aval_reg = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920, }, }; struct tmds_config { uint32_t pclk; uint32_t pll0; uint32_t pll1; uint32_t drive_c; uint32_t pe_c; uint32_t peak_c; uint32_t pad_ctls; }; static const struct tmds_config tegra124_tmds_config[] = { { /* 480p/576p / 25.2MHz/27MHz */ .pclk = 27000000, .pll0 = 0x01003010, .pll1 = 0x00301B00, .drive_c = 0x1F1F1F1F, .pe_c = 0x00000000, .peak_c = 0x03030303, .pad_ctls = 0x800034BB, }, { /* 720p/1080i / 74.25MHz */ .pclk = 74250000, .pll0 = 0x01003110, .pll1 = 0x00301500, .drive_c = 0x2C2C2C2C, .pe_c = 0x00000000, .peak_c = 0x07070707, .pad_ctls = 0x800034BB, }, { /* 1080p / 148.5MHz */ .pclk = 148500000, .pll0 = 0x01003310, .pll1 = 0x00301500, .drive_c = 0x33333333, .pe_c = 0x00000000, .peak_c = 0x0C0C0C0C, .pad_ctls = 0x800034BB, }, { /* 2216p / 297MHz */ .pclk = UINT_MAX, .pll0 = 0x01003F10, .pll1 = 0x00300F00, .drive_c = 0x37373737, .pe_c = 0x00000000, .peak_c = 0x17171717, .pad_ctls = 0x800036BB, }, }; struct hdmi_softc { device_t dev; struct resource *mem_res; struct resource *irq_res; void *irq_ih; clk_t clk_parent; clk_t clk_hdmi; hwreset_t hwreset_hdmi; regulator_t supply_hdmi; regulator_t supply_pll; regulator_t supply_vdd; uint64_t pclk; boolean_t hdmi_mode; int audio_src_type; int audio_freq; int audio_chans; struct tegra_drm *drm; struct tegra_drm_encoder output; const struct tmds_config *tmds_config; int n_tmds_configs; }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-hdmi", 1}, {NULL, 0}, }; /* These functions have been copied from newer version of drm_edid.c */ /* ELD Header Block */ #define DRM_ELD_HEADER_BLOCK_SIZE 4 #define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ static int drm_eld_size(const uint8_t *eld) { return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; } static int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, struct drm_display_mode *mode) { int rv; if (!frame || !mode) return -EINVAL; rv = hdmi_avi_infoframe_init(frame); if (rv < 0) return rv; if (mode->flags & DRM_MODE_FLAG_DBLCLK) frame->pixel_repeat = 1; frame->video_code = drm_match_cea_mode(mode); frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; #ifdef FREEBSD_NOTYET /* * Populate picture aspect ratio from either * user input (if specified) or from the CEA mode list. */ if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 || mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9) frame->picture_aspect = mode->picture_aspect_ratio; else if (frame->video_code > 0) frame->picture_aspect = drm_get_cea_aspect_ratio( frame->video_code); #endif frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN; return 0; } /* --------------------------------------------------------------------- */ static int hdmi_setup_clock(struct tegra_drm_encoder *output, clk_t clk, uint64_t pclk) { struct hdmi_softc *sc; uint64_t freq; int rv; sc = device_get_softc(output->dev); /* Disable consumers clock for while. */ rv = clk_disable(sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot disable 'hdmi' clock\n"); return (rv); } rv = clk_disable(clk); if (rv != 0) { device_printf(sc->dev, "Cannot disable display clock\n"); return (rv); } /* Set frequency for Display Controller PLL. */ freq = HDMI_DC_CLOCK_MULTIPIER * pclk; rv = clk_set_freq(sc->clk_parent, freq, 0); if (rv != 0) { device_printf(output->dev, "Cannot set display pixel frequency\n"); return (rv); } /* Reparent display controller */ rv = clk_set_parent_by_clk(clk, sc->clk_parent); if (rv != 0) { device_printf(output->dev, "Cannot set parent clock\n"); return (rv); } rv = clk_set_freq(clk, freq, 0); if (rv != 0) { device_printf(output->dev, "Cannot set display controller frequency\n"); return (rv); } rv = clk_set_freq(sc->clk_hdmi, pclk, 0); if (rv != 0) { device_printf(output->dev, "Cannot set display controller frequency\n"); return (rv); } /* And reenable consumers clock. */ rv = clk_enable(clk); if (rv != 0) { device_printf(sc->dev, "Cannot enable display clock\n"); return (rv); } rv = clk_enable(sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'hdmi' clock\n"); return (rv); } rv = clk_get_freq(clk, &freq); if (rv != 0) { device_printf(output->dev, "Cannot get display controller frequency\n"); return (rv); } DRM_DEBUG_KMS("DC frequency: %llu\n", freq); return (0); } /* ------------------------------------------------------------------- * * Infoframes. * */ static void avi_setup_infoframe(struct hdmi_softc *sc, struct drm_display_mode *mode) { struct hdmi_avi_infoframe frame; - uint8_t buf[17], *hdr, *pb;; + uint8_t buf[17], *hdr, *pb; ssize_t rv; rv = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); if (rv < 0) { device_printf(sc->dev, "Cannot setup AVI infoframe: %zd\n", rv); return; } rv = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf)); if (rv < 0) { device_printf(sc->dev, "Cannot pack AVI infoframe: %zd\n", rv); return; } hdr = buf + 0; pb = buf + 3; WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER, (hdr[2] << 16) | (hdr[1] << 8) | (hdr[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, (pb[3] << 24) |(pb[2] << 16) | (pb[1] << 8) | (pb[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, (pb[6] << 16) | (pb[5] << 8) | (pb[4] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, (pb[10] << 24) |(pb[9] << 16) | (pb[8] << 8) | (pb[7] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, (pb[13] << 16) | (pb[12] << 8) | (pb[11] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL, AVI_INFOFRAME_CTRL_ENABLE); } static void audio_setup_infoframe(struct hdmi_softc *sc) { struct hdmi_audio_infoframe frame; uint8_t buf[14], *hdr, *pb; ssize_t rv; rv = hdmi_audio_infoframe_init(&frame); frame.channels = sc->audio_chans; rv = hdmi_audio_infoframe_pack(&frame, buf, sizeof(buf)); if (rv < 0) { device_printf(sc->dev, "Cannot pack audio infoframe\n"); return; } hdr = buf + 0; pb = buf + 3; WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER, (hdr[2] << 16) | (hdr[1] << 8) | (hdr[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW, (pb[3] << 24) |(pb[2] << 16) | (pb[1] << 8) | (pb[0] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH, (pb[5] << 8) | (pb[4] << 0)); WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL, AUDIO_INFOFRAME_CTRL_ENABLE); } /* ------------------------------------------------------------------- * * Audio * */ static void init_hda_eld(struct hdmi_softc *sc) { size_t size; int i ; uint32_t val; size = drm_eld_size(sc->output.connector.eld); for (i = 0; i < HDMI_ELD_BUFFER_SIZE; i++) { val = i << 8; if (i < size) val |= sc->output.connector.eld[i]; WR4(sc, HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR, val); } WR4(sc,HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE, SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT); } static int get_audio_regs(int freq, bus_size_t *acr_reg, bus_size_t *nval_reg, bus_size_t *aval_reg) { int i; const struct audio_reg *reg; for (i = 0; i < nitems(audio_regs) ; i++) { reg = audio_regs + i; if (reg->audio_clk == freq) { if (acr_reg != NULL) *acr_reg = reg->acr_reg; if (nval_reg != NULL) *nval_reg = reg->nval_reg; if (aval_reg != NULL) *aval_reg = reg->aval_reg; return (0); } } return (ERANGE); } #define FR_BITS 16 #define TO_FFP(x) (((int64_t)(x)) << FR_BITS) #define TO_INT(x) ((int)((x) >> FR_BITS)) static int get_hda_cts_n(uint32_t audio_freq_hz, uint32_t pixclk_freq_hz, uint32_t *best_cts, uint32_t *best_n, uint32_t *best_a) { int min_n; int max_n; int ideal_n; int n; int cts; int aval; int64_t err_f; int64_t min_err_f; int64_t cts_f; int64_t aval_f; int64_t half_f; /* constant 0.5 */ bool better_n; /* * All floats are in fixed I48.16 format. * * Ideal ACR interval is 1000 hz (1 ms); * acceptable is 300 hz .. 1500 hz */ min_n = 128 * audio_freq_hz / 1500; max_n = 128 * audio_freq_hz / 300; ideal_n = 128 * audio_freq_hz / 1000; min_err_f = TO_FFP(100); half_f = TO_FFP(1) / 2; *best_n = 0; *best_cts = 0; *best_a = 0; for (n = min_n; n <= max_n; n++) { cts_f = TO_FFP(pixclk_freq_hz); cts_f *= n; cts_f /= 128 * audio_freq_hz; cts = TO_INT(cts_f + half_f); /* round */ err_f = cts_f - TO_FFP(cts); if (err_f < 0) err_f = -err_f; aval_f = TO_FFP(24000000); aval_f *= n; aval_f /= 128 * audio_freq_hz; aval = TO_INT(aval_f); /* truncate */ better_n = abs(n - ideal_n) < abs((int)(*best_n) - ideal_n); if (TO_FFP(aval) == aval_f && (err_f < min_err_f || (err_f == min_err_f && better_n))) { min_err_f = err_f; *best_n = (uint32_t)n; *best_cts = (uint32_t)cts; *best_a = (uint32_t)aval; if (err_f == 0 && n == ideal_n) break; } } return (0); } #undef FR_BITS #undef TO_FFP #undef TO_INT static int audio_setup(struct hdmi_softc *sc) { uint32_t val; uint32_t audio_n; uint32_t audio_cts; uint32_t audio_aval; uint64_t hdmi_freq; bus_size_t aval_reg; int rv; if (!sc->hdmi_mode) return (ENOTSUP); rv = get_audio_regs(sc->audio_freq, NULL, NULL, &aval_reg); if (rv != 0) { device_printf(sc->dev, "Unsupported audio frequency.\n"); return (rv); } rv = clk_get_freq(sc->clk_hdmi, &hdmi_freq); if (rv != 0) { device_printf(sc->dev, "Cannot get hdmi frequency: %d\n", rv); return (rv); } rv = get_hda_cts_n(sc->audio_freq, hdmi_freq, &audio_cts, &audio_n, &audio_aval); if (rv != 0) { device_printf(sc->dev, "Cannot compute audio coefs: %d\n", rv); return (rv); } /* Audio infoframe. */ audio_setup_infoframe(sc); /* Setup audio source */ WR4(sc, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0, SOR_AUDIO_CNTRL0_SOURCE_SELECT(sc->audio_src_type) | SOR_AUDIO_CNTRL0_INJECT_NULLSMPL); val = RD4(sc, HDMI_NV_PDISP_SOR_AUDIO_SPARE0); val |= SOR_AUDIO_SPARE0_HBR_ENABLE; WR4(sc, HDMI_NV_PDISP_SOR_AUDIO_SPARE0, val); WR4(sc, HDMI_NV_PDISP_HDMI_ACR_CTRL, 0); WR4(sc, HDMI_NV_PDISP_AUDIO_N, AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE | AUDIO_N_VALUE(audio_n - 1)); WR4(sc, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH, ACR_SUBPACK_N(audio_n) | ACR_ENABLE); WR4(sc, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW, ACR_SUBPACK_CTS(audio_cts)); WR4(sc, HDMI_NV_PDISP_HDMI_SPARE, SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1)); val = RD4(sc, HDMI_NV_PDISP_AUDIO_N); val &= ~AUDIO_N_RESETF; WR4(sc, HDMI_NV_PDISP_AUDIO_N, val); WR4(sc, aval_reg, audio_aval); return (0); } static void audio_disable(struct hdmi_softc *sc) { uint32_t val; /* Disable audio */ val = RD4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); val &= ~GENERIC_CTRL_AUDIO; WR4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL, val); /* Disable audio infoframes */ val = RD4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); val &= ~AUDIO_INFOFRAME_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL, val); } static void audio_enable(struct hdmi_softc *sc) { uint32_t val; if (!sc->hdmi_mode) audio_disable(sc); /* Enable audio infoframes */ val = RD4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); val |= AUDIO_INFOFRAME_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL, val); /* Enable audio */ val = RD4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); val |= GENERIC_CTRL_AUDIO; WR4(sc, HDMI_NV_PDISP_HDMI_GENERIC_CTRL, val); } /* ------------------------------------------------------------------- * * HDMI. * */ /* Process format change notification from HDA */ static void hda_intr(struct hdmi_softc *sc) { uint32_t val; int rv; if (!sc->hdmi_mode) return; val = RD4(sc, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0); if ((val & (1 << 30)) == 0) { audio_disable(sc); return; } /* XXX Move this to any header */ /* Keep in sync with HDA */ sc->audio_freq = val & 0x00FFFFFF; sc->audio_chans = (val >> 24) & 0x0f; DRM_DEBUG_KMS("%d channel(s) at %dHz\n", sc->audio_chans, sc->audio_freq); rv = audio_setup(sc); if (rv != 0) { audio_disable(sc); return; } audio_enable(sc); } static void tmds_init(struct hdmi_softc *sc, const struct tmds_config *tmds) { WR4(sc, HDMI_NV_PDISP_SOR_PLL0, tmds->pll0); WR4(sc, HDMI_NV_PDISP_SOR_PLL1, tmds->pll1); WR4(sc, HDMI_NV_PDISP_PE_CURRENT, tmds->pe_c); WR4(sc, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT, tmds->drive_c); WR4(sc, HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT, tmds->peak_c); WR4(sc, HDMI_NV_PDISP_SOR_PAD_CTLS0, tmds->pad_ctls); } static int hdmi_sor_start(struct hdmi_softc *sc, struct drm_display_mode *mode) { int i; uint32_t val; /* Enable TMDS macro */ val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PWR; val &= ~SOR_PLL0_VCOPD; val &= ~SOR_PLL0_PULLDOWN; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); DELAY(10); val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PDBG; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); WR4(sc, HDMI_NV_PDISP_SOR_PWR, SOR_PWR_SETTING_NEW); WR4(sc, HDMI_NV_PDISP_SOR_PWR, 0); /* Wait until SOR is ready */ for (i = 1000; i > 0; i--) { val = RD4(sc, HDMI_NV_PDISP_SOR_PWR); if ((val & SOR_PWR_SETTING_NEW) == 0) break; DELAY(10); } if (i <= 0) { device_printf(sc->dev, "Timeouted while enabling SOR power.\n"); return (ETIMEDOUT); } val = SOR_STATE2_ASY_OWNER(ASY_OWNER_HEAD0) | SOR_STATE2_ASY_SUBOWNER(SUBOWNER_BOTH) | SOR_STATE2_ASY_CRCMODE(ASY_CRCMODE_COMPLETE) | SOR_STATE2_ASY_PROTOCOL(ASY_PROTOCOL_SINGLE_TMDS_A); if (mode->flags & DRM_MODE_FLAG_NHSYNC) val |= SOR_STATE2_ASY_HSYNCPOL_NEG; if (mode->flags & DRM_MODE_FLAG_NVSYNC) val |= SOR_STATE2_ASY_VSYNCPOL_NEG; WR4(sc, HDMI_NV_PDISP_SOR_STATE2, val); WR4(sc, HDMI_NV_PDISP_SOR_STATE1, SOR_STATE1_ASY_ORMODE_NORMAL | SOR_STATE1_ASY_HEAD_OPMODE(ASY_HEAD_OPMODE_AWAKE)); WR4(sc, HDMI_NV_PDISP_SOR_STATE0, 0); WR4(sc, HDMI_NV_PDISP_SOR_STATE0, SOR_STATE0_UPDATE); val = RD4(sc, HDMI_NV_PDISP_SOR_STATE1); val |= SOR_STATE1_ATTACHED; WR4(sc, HDMI_NV_PDISP_SOR_STATE1, val); WR4(sc, HDMI_NV_PDISP_SOR_STATE0, 0); return 0; } static int hdmi_disable(struct hdmi_softc *sc) { struct tegra_crtc *crtc; device_t dc; uint32_t val; dc = NULL; if (sc->output.encoder.crtc != NULL) { crtc = container_of(sc->output.encoder.crtc, struct tegra_crtc, drm_crtc); dc = crtc->dev; } if (dc != NULL) { TEGRA_DC_HDMI_ENABLE(dc, false); TEGRA_DC_DISPLAY_ENABLE(dc, false); } audio_disable(sc); val = RD4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); val &= ~AVI_INFOFRAME_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL, val); /* Disable interrupts */ WR4(sc, HDMI_NV_PDISP_INT_ENABLE, 0); WR4(sc, HDMI_NV_PDISP_INT_MASK, 0); return (0); } static int hdmi_enable(struct hdmi_softc *sc) { uint64_t freq; struct drm_display_mode *mode; struct tegra_crtc *crtc; uint32_t val, h_sync_width, h_back_porch, h_front_porch, h_pulse_start; uint32_t h_max_ac_packet, div8_2; device_t dc; int i, rv; mode = &sc->output.encoder.crtc->mode; crtc = container_of(sc->output.encoder.crtc, struct tegra_crtc, drm_crtc); dc = crtc->dev; /* Compute all timings first. */ sc->pclk = mode->clock * 1000; h_sync_width = mode->hsync_end - mode->hsync_start; h_back_porch = mode->htotal - mode->hsync_end; h_front_porch = mode->hsync_start - mode->hdisplay; h_pulse_start = 1 + h_sync_width + h_back_porch - 10; h_max_ac_packet = (h_sync_width + h_back_porch + h_front_porch - HDMI_REKEY_DEFAULT - 18) / 32; /* Check if HDMI device is connected and detected. */ if (sc->output.connector.edid_blob_ptr == NULL) { sc->hdmi_mode = false; } else { sc->hdmi_mode = drm_detect_hdmi_monitor( (struct edid *)sc->output.connector.edid_blob_ptr->data); } /* Get exact HDMI pixel frequency. */ rv = clk_get_freq(sc->clk_hdmi, &freq); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' clock frequency\n"); return (rv); } DRM_DEBUG_KMS("HDMI frequency: %llu Hz\n", freq); /* Wakeup SOR power */ val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PDBG; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); DELAY(10); val = RD4(sc, HDMI_NV_PDISP_SOR_PLL0); val &= ~SOR_PLL0_PWR; WR4(sc, HDMI_NV_PDISP_SOR_PLL0, val); /* Setup timings */ TEGRA_DC_SETUP_TIMING(dc, h_pulse_start); WR4(sc, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW, VSYNC_WINDOW_START(0x200) | VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_ENABLE); /* Setup video source and adjust video range */ val = 0; if (crtc->nvidia_head != 0) HDMI_SRC_DISPLAYB; if ((mode->hdisplay != 640) || (mode->vdisplay != 480)) val |= ARM_VIDEO_RANGE_LIMITED; WR4(sc, HDMI_NV_PDISP_INPUT_CONTROL, val); /* Program SOR reference clock - it uses 8.2 fractional divisor */ div8_2 = (freq * 4) / 1000000; val = SOR_REFCLK_DIV_INT(div8_2 >> 2) | SOR_REFCLK_DIV_FRAC(div8_2); WR4(sc, HDMI_NV_PDISP_SOR_REFCLK, val); /* Setup audio */ if (sc->hdmi_mode) { rv = audio_setup(sc); if (rv != 0) sc->hdmi_mode = false; } /* Init HDA ELD */ init_hda_eld(sc); val = HDMI_CTRL_REKEY(HDMI_REKEY_DEFAULT); val |= HDMI_CTRL_MAX_AC_PACKET(h_max_ac_packet); if (sc->hdmi_mode) val |= HDMI_CTRL_ENABLE; WR4(sc, HDMI_NV_PDISP_HDMI_CTRL, val); /* Setup TMDS */ for (i = 0; i < sc->n_tmds_configs; i++) { if (sc->pclk <= sc->tmds_config[i].pclk) { tmds_init(sc, sc->tmds_config + i); break; } } /* Program sequencer. */ WR4(sc, HDMI_NV_PDISP_SOR_SEQ_CTL, SOR_SEQ_PU_PC(0) | SOR_SEQ_PU_PC_ALT(0) | SOR_SEQ_PD_PC(8) | SOR_SEQ_PD_PC_ALT(8)); val = SOR_SEQ_INST_WAIT_TIME(1) | SOR_SEQ_INST_WAIT_UNITS(WAIT_UNITS_VSYNC) | SOR_SEQ_INST_HALT | SOR_SEQ_INST_DRIVE_PWM_OUT_LO; WR4(sc, HDMI_NV_PDISP_SOR_SEQ_INST(0), val); WR4(sc, HDMI_NV_PDISP_SOR_SEQ_INST(8), val); val = RD4(sc,HDMI_NV_PDISP_SOR_CSTM); val &= ~SOR_CSTM_LVDS_ENABLE; val &= ~SOR_CSTM_ROTCLK(~0); val |= SOR_CSTM_ROTCLK(2); val &= ~SOR_CSTM_MODE(~0); val |= SOR_CSTM_MODE(CSTM_MODE_TMDS); val |= SOR_CSTM_PLLDIV; WR4(sc, HDMI_NV_PDISP_SOR_CSTM, val); TEGRA_DC_DISPLAY_ENABLE(dc, false); rv = hdmi_sor_start(sc, mode); if (rv != 0) return (rv); TEGRA_DC_HDMI_ENABLE(dc, true); TEGRA_DC_DISPLAY_ENABLE(dc, true); /* Enable HDA codec interrupt */ WR4(sc, HDMI_NV_PDISP_INT_MASK, INT_CODEC_SCRATCH0); WR4(sc, HDMI_NV_PDISP_INT_ENABLE, INT_CODEC_SCRATCH0); if (sc->hdmi_mode) { avi_setup_infoframe(sc, mode); audio_enable(sc); } return (0); } /* ------------------------------------------------------------------- * * DRM Interface. * */ static enum drm_mode_status hdmi_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct tegra_drm_encoder *output; struct hdmi_softc *sc; int rv; uint64_t freq; output = container_of(connector, struct tegra_drm_encoder, connector); sc = device_get_softc(output->dev); freq = HDMI_DC_CLOCK_MULTIPIER * mode->clock * 1000; rv = clk_test_freq(sc->clk_parent, freq, 0); DRM_DEBUG_KMS("Test HDMI frequency: %u kHz, rv: %d\n", mode->clock, rv); if (rv != 0) return (MODE_NOCLOCK); return (MODE_OK); } static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { .get_modes = tegra_drm_connector_get_modes, .mode_valid = hdmi_connector_mode_valid, .best_encoder = tegra_drm_connector_best_encoder, }; static const struct drm_connector_funcs hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = tegra_drm_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, }; static const struct drm_encoder_funcs hdmi_encoder_funcs = { .destroy = drm_encoder_cleanup, }; static void hdmi_encoder_dpms(struct drm_encoder *encoder, int mode) { /* Empty function. */ } static bool hdmi_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted) { return (true); } static void hdmi_encoder_prepare(struct drm_encoder *encoder) { /* Empty function. */ } static void hdmi_encoder_commit(struct drm_encoder *encoder) { /* Empty function. */ } static void hdmi_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted) { struct tegra_drm_encoder *output; struct hdmi_softc *sc; int rv; output = container_of(encoder, struct tegra_drm_encoder, encoder); sc = device_get_softc(output->dev); rv = hdmi_enable(sc); if (rv != 0) device_printf(sc->dev, "Cannot enable HDMI port\n"); } static void hdmi_encoder_disable(struct drm_encoder *encoder) { struct tegra_drm_encoder *output; struct hdmi_softc *sc; int rv; output = container_of(encoder, struct tegra_drm_encoder, encoder); sc = device_get_softc(output->dev); if (sc == NULL) return; rv = hdmi_disable(sc); if (rv != 0) device_printf(sc->dev, "Cannot disable HDMI port\n"); } static const struct drm_encoder_helper_funcs hdmi_encoder_helper_funcs = { .dpms = hdmi_encoder_dpms, .mode_fixup = hdmi_encoder_mode_fixup, .prepare = hdmi_encoder_prepare, .commit = hdmi_encoder_commit, .mode_set = hdmi_encoder_mode_set, .disable = hdmi_encoder_disable, }; /* ------------------------------------------------------------------- * * Bus and infrastructure. * */ static int hdmi_init_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct hdmi_softc *sc; phandle_t node; int rv; sc = device_get_softc(dev); node = ofw_bus_get_node(sc->dev); sc->drm = drm; sc->output.setup_clock = &hdmi_setup_clock; rv = tegra_drm_encoder_attach(&sc->output, node); if (rv != 0) { device_printf(dev, "Cannot attach output connector\n"); return(ENXIO); } /* Connect this encoder + connector to DRM. */ drm_connector_init(&drm->drm_dev, &sc->output.connector, &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(&sc->output.connector, &hdmi_connector_helper_funcs); sc->output.connector.dpms = DRM_MODE_DPMS_OFF; drm_encoder_init(&drm->drm_dev, &sc->output.encoder, &hdmi_encoder_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(&sc->output.encoder, &hdmi_encoder_helper_funcs); drm_mode_connector_attach_encoder(&sc->output.connector, &sc->output.encoder); rv = tegra_drm_encoder_init(&sc->output, drm); if (rv < 0) { device_printf(sc->dev, "Unable to init HDMI output\n"); return (rv); } sc->output.encoder.possible_crtcs = 0x3; return (0); } static int hdmi_exit_client(device_t dev, device_t host1x, struct tegra_drm *drm) { struct hdmi_softc *sc; sc = device_get_softc(dev); tegra_drm_encoder_exit(&sc->output, drm); return (0); } static int get_fdt_resources(struct hdmi_softc *sc, phandle_t node) { int rv; rv = regulator_get_by_ofw_property(sc->dev, 0, "hdmi-supply", &sc->supply_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' regulator\n"); return (ENXIO); } rv = regulator_get_by_ofw_property(sc->dev,0, "pll-supply", &sc->supply_pll); if (rv != 0) { device_printf(sc->dev, "Cannot get 'pll' regulator\n"); return (ENXIO); } rv = regulator_get_by_ofw_property(sc->dev, 0, "vdd-supply", &sc->supply_vdd); if (rv != 0) { device_printf(sc->dev, "Cannot get 'vdd' regulator\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "hdmi", &sc->hwreset_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' reset\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "parent", &sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot get 'parent' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "hdmi", &sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot get 'hdmi' clock\n"); return (ENXIO); } return (0); } static int enable_fdt_resources(struct hdmi_softc *sc) { int rv; rv = clk_set_parent_by_clk(sc->clk_hdmi, sc->clk_parent); if (rv != 0) { device_printf(sc->dev, "Cannot set parent for 'hdmi' clock\n"); return (rv); } /* 594 MHz is arbitrarily selected value */ rv = clk_set_freq(sc->clk_parent, 594000000, 0); if (rv != 0) { device_printf(sc->dev, "Cannot set frequency for 'hdmi' parent clock\n"); return (rv); } rv = clk_set_freq(sc->clk_hdmi, 594000000 / 4, 0); if (rv != 0) { device_printf(sc->dev, "Cannot set frequency for 'hdmi' parent clock\n"); return (rv); } rv = regulator_enable(sc->supply_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'hdmi' regulator\n"); return (rv); } rv = regulator_enable(sc->supply_pll); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'pll' regulator\n"); return (rv); } rv = regulator_enable(sc->supply_vdd); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'vdd' regulator\n"); return (rv); } rv = clk_enable(sc->clk_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'hdmi' clock\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_hdmi); if (rv != 0) { device_printf(sc->dev, "Cannot unreset 'hdmi' reset\n"); return (rv); } return (0); } static void hdmi_intr(void *arg) { struct hdmi_softc *sc; uint32_t status; sc = arg; /* Confirm interrupt */ status = RD4(sc, HDMI_NV_PDISP_INT_STATUS); WR4(sc, HDMI_NV_PDISP_INT_STATUS, status); /* process audio verb from HDA */ if (status & INT_CODEC_SCRATCH0) hda_intr(sc); } static int hdmi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Tegra HDMI"); return (BUS_PROBE_DEFAULT); } static int hdmi_attach(device_t dev) { struct hdmi_softc *sc; phandle_t node; int rid, rv; sc = device_get_softc(dev); sc->dev = dev; sc->output.dev = sc->dev; node = ofw_bus_get_node(sc->dev); sc->audio_src_type = SOURCE_SELECT_AUTO; sc->audio_freq = 44100; sc->audio_chans = 2; sc->hdmi_mode = false; sc->tmds_config = tegra124_tmds_config; sc->n_tmds_configs = nitems(tegra124_tmds_config); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate IRQ resources\n"); goto fail; } rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, hdmi_intr, sc, &sc->irq_ih); if (rv != 0) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); goto fail; } rv = get_fdt_resources(sc, node); if (rv != 0) { device_printf(dev, "Cannot parse FDT resources\n"); goto fail; } rv = enable_fdt_resources(sc); if (rv != 0) { device_printf(dev, "Cannot enable FDT resources\n"); goto fail; } rv = TEGRA_DRM_REGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (rv != 0) { device_printf(dev, "Cannot register DRM device\n"); goto fail; } return (bus_generic_attach(dev)); fail: TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_hdmi != NULL) clk_release(sc->clk_hdmi); if (sc->hwreset_hdmi != NULL) hwreset_release(sc->hwreset_hdmi); if (sc->supply_hdmi != NULL) regulator_release(sc->supply_hdmi); if (sc->supply_pll != NULL) regulator_release(sc->supply_pll); if (sc->supply_vdd != NULL) regulator_release(sc->supply_vdd); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (ENXIO); } static int hdmi_detach(device_t dev) { struct hdmi_softc *sc; sc = device_get_softc(dev); TEGRA_DRM_DEREGISTER_CLIENT(device_get_parent(sc->dev), sc->dev); if (sc->irq_ih != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->clk_parent != NULL) clk_release(sc->clk_parent); if (sc->clk_hdmi != NULL) clk_release(sc->clk_hdmi); if (sc->hwreset_hdmi != NULL) hwreset_release(sc->hwreset_hdmi); if (sc->supply_hdmi != NULL) regulator_release(sc->supply_hdmi); if (sc->supply_pll != NULL) regulator_release(sc->supply_pll); if (sc->supply_vdd != NULL) regulator_release(sc->supply_vdd); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (bus_generic_detach(dev)); } static device_method_t tegra_hdmi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hdmi_probe), DEVMETHOD(device_attach, hdmi_attach), DEVMETHOD(device_detach, hdmi_detach), /* tegra drm interface */ DEVMETHOD(tegra_drm_init_client, hdmi_init_client), DEVMETHOD(tegra_drm_exit_client, hdmi_exit_client), DEVMETHOD_END }; static devclass_t tegra_hdmi_devclass; DEFINE_CLASS_0(tegra_hdmi, tegra_hdmi_driver, tegra_hdmi_methods, sizeof(struct hdmi_softc)); DRIVER_MODULE(tegra_hdmi, host1x, tegra_hdmi_driver, tegra_hdmi_devclass, 0, 0); Index: head/sys/dev/bnxt/bnxt_hwrm.c =================================================================== --- head/sys/dev/bnxt/bnxt_hwrm.c (revision 359440) +++ head/sys/dev/bnxt/bnxt_hwrm.c (revision 359441) @@ -1,1812 +1,1812 @@ /*- * Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2016 Broadcom, All Rights Reserved. * The term Broadcom refers to Broadcom Limited and/or its subsidiaries * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include "bnxt.h" #include "bnxt_hwrm.h" #include "hsi_struct_def.h" static int bnxt_hwrm_err_map(uint16_t err); static inline int _is_valid_ether_addr(uint8_t *); static inline void get_random_ether_addr(uint8_t *); static void bnxt_hwrm_set_link_common(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req); static void bnxt_hwrm_set_pause_common(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req); static void bnxt_hwrm_set_eee(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req); static int _hwrm_send_message(struct bnxt_softc *, void *, uint32_t); static int hwrm_send_message(struct bnxt_softc *, void *, uint32_t); static void bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t); /* NVRam stuff has a five minute timeout */ #define BNXT_NVM_TIMEO (5 * 60 * 1000) static int bnxt_hwrm_err_map(uint16_t err) { int rc; switch (err) { case HWRM_ERR_CODE_SUCCESS: return 0; case HWRM_ERR_CODE_INVALID_PARAMS: case HWRM_ERR_CODE_INVALID_FLAGS: case HWRM_ERR_CODE_INVALID_ENABLES: return EINVAL; case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: return EACCES; case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: return ENOMEM; case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: return ENOSYS; case HWRM_ERR_CODE_FAIL: return EIO; case HWRM_ERR_CODE_HWRM_ERROR: case HWRM_ERR_CODE_UNKNOWN_ERR: default: return EDOOFUS; } return rc; } int bnxt_alloc_hwrm_dma_mem(struct bnxt_softc *softc) { int rc; rc = iflib_dma_alloc(softc->ctx, PAGE_SIZE, &softc->hwrm_cmd_resp, BUS_DMA_NOWAIT); return rc; } void bnxt_free_hwrm_dma_mem(struct bnxt_softc *softc) { if (softc->hwrm_cmd_resp.idi_vaddr) iflib_dma_free(&softc->hwrm_cmd_resp); softc->hwrm_cmd_resp.idi_vaddr = NULL; return; } static void bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request, uint16_t req_type) { struct input *req = request; req->req_type = htole16(req_type); req->cmpl_ring = 0xffff; req->target_id = 0xffff; req->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr); } static int _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len) { struct input *req = msg; struct hwrm_err_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; uint32_t *data = msg; int i; uint16_t cp_ring_id; uint8_t *valid; uint16_t err; uint16_t max_req_len = HWRM_MAX_REQ_LEN; struct hwrm_short_input short_input = {0}; /* TODO: DMASYNC in here. */ req->seq_id = htole16(softc->hwrm_cmd_seq++); memset(resp, 0, PAGE_SIZE); cp_ring_id = le16toh(req->cmpl_ring); if (softc->flags & BNXT_FLAG_SHORT_CMD) { void *short_cmd_req = softc->hwrm_short_cmd_req_addr.idi_vaddr; memcpy(short_cmd_req, req, msg_len); memset((uint8_t *) short_cmd_req + msg_len, 0, softc->hwrm_max_req_len- msg_len); short_input.req_type = req->req_type; short_input.signature = htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD); short_input.size = htole16(msg_len); short_input.req_addr = htole64(softc->hwrm_short_cmd_req_addr.idi_paddr); data = (uint32_t *)&short_input; msg_len = sizeof(short_input); /* Sync memory write before updating doorbell */ wmb(); max_req_len = BNXT_HWRM_SHORT_REQ_LEN; } /* Write request msg to hwrm channel */ for (i = 0; i < msg_len; i += 4) { bus_space_write_4(softc->hwrm_bar.tag, softc->hwrm_bar.handle, i, *data); data++; } /* Clear to the end of the request buffer */ for (i = msg_len; i < max_req_len; i += 4) bus_space_write_4(softc->hwrm_bar.tag, softc->hwrm_bar.handle, i, 0); /* Ring channel doorbell */ bus_space_write_4(softc->hwrm_bar.tag, softc->hwrm_bar.handle, 0x100, htole32(1)); /* Check if response len is updated */ for (i = 0; i < softc->hwrm_cmd_timeo; i++) { if (resp->resp_len && resp->resp_len <= 4096) break; DELAY(1000); } if (i >= softc->hwrm_cmd_timeo) { device_printf(softc->dev, "Timeout sending %s: (timeout: %u) seq: %d\n", GET_HWRM_REQ_TYPE(req->req_type), softc->hwrm_cmd_timeo, le16toh(req->seq_id)); return ETIMEDOUT; } /* Last byte of resp contains the valid key */ valid = (uint8_t *)resp + resp->resp_len - 1; for (i = 0; i < softc->hwrm_cmd_timeo; i++) { if (*valid == HWRM_RESP_VALID_KEY) break; DELAY(1000); } if (i >= softc->hwrm_cmd_timeo) { device_printf(softc->dev, "Timeout sending %s: " "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n", GET_HWRM_REQ_TYPE(req->req_type), softc->hwrm_cmd_timeo, le16toh(req->req_type), le16toh(req->seq_id), msg_len, *valid); return ETIMEDOUT; } err = le16toh(resp->error_code); if (err) { /* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */ if (err != HWRM_ERR_CODE_FAIL) { device_printf(softc->dev, "%s command returned %s error.\n", GET_HWRM_REQ_TYPE(req->req_type), GET_HWRM_ERROR_CODE(err)); } return bnxt_hwrm_err_map(err); } return 0; } static int hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len) { int rc; BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, msg, msg_len); BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc) { struct hwrm_queue_qportcfg_input req = {0}; struct hwrm_queue_qportcfg_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc = 0; uint8_t *qptr; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto qportcfg_exit; if (!resp->max_configurable_queues) { rc = -EINVAL; goto qportcfg_exit; } softc->max_tc = resp->max_configurable_queues; if (softc->max_tc > BNXT_MAX_QUEUE) softc->max_tc = BNXT_MAX_QUEUE; qptr = &resp->queue_id0; for (int i = 0; i < softc->max_tc; i++) { softc->q_info[i].id = *qptr++; softc->q_info[i].profile = *qptr++; } qportcfg_exit: BNXT_HWRM_UNLOCK(softc); return (rc); } int bnxt_hwrm_ver_get(struct bnxt_softc *softc) { struct hwrm_ver_get_input req = {0}; struct hwrm_ver_get_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc; const char nastr[] = ""; const char naver[] = ""; uint32_t dev_caps_cfg; softc->hwrm_max_req_len = HWRM_MAX_REQ_LEN; softc->hwrm_cmd_timeo = 1000; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; req.hwrm_intf_min = HWRM_VERSION_MINOR; req.hwrm_intf_upd = HWRM_VERSION_UPDATE; BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto fail; snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d", resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj; softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min; softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd; snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d", resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld); strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR, BNXT_VERSTR_SIZE); strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name, BNXT_NAME_SIZE); if (resp->mgmt_fw_maj == 0 && resp->mgmt_fw_min == 0 && resp->mgmt_fw_bld == 0) { strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE); strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE); } else { snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d", resp->mgmt_fw_maj, resp->mgmt_fw_min, resp->mgmt_fw_bld); strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name, BNXT_NAME_SIZE); } if (resp->netctrl_fw_maj == 0 && resp->netctrl_fw_min == 0 && resp->netctrl_fw_bld == 0) { strlcpy(softc->ver_info->netctrl_fw_ver, naver, BNXT_VERSTR_SIZE); strlcpy(softc->ver_info->netctrl_fw_name, nastr, BNXT_NAME_SIZE); } else { snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d", resp->netctrl_fw_maj, resp->netctrl_fw_min, resp->netctrl_fw_bld); strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name, BNXT_NAME_SIZE); } if (resp->roce_fw_maj == 0 && resp->roce_fw_min == 0 && resp->roce_fw_bld == 0) { strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE); strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE); } else { snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d", resp->roce_fw_maj, resp->roce_fw_min, resp->roce_fw_bld); strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name, BNXT_NAME_SIZE); } softc->ver_info->chip_num = le16toh(resp->chip_num); softc->ver_info->chip_rev = resp->chip_rev; softc->ver_info->chip_metal = resp->chip_metal; softc->ver_info->chip_bond_id = resp->chip_bond_id; softc->ver_info->chip_type = resp->chip_platform_type; if (resp->max_req_win_len) softc->hwrm_max_req_len = le16toh(resp->max_req_win_len); if (resp->def_req_timeout) softc->hwrm_cmd_timeo = le16toh(resp->def_req_timeout); dev_caps_cfg = le32toh(resp->dev_caps_cfg); if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) softc->flags |= BNXT_FLAG_SHORT_CMD; fail: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc) { struct hwrm_func_drv_rgtr_input req = {0}; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR); req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE); req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD); req.ver_maj = __FreeBSD_version / 100000; req.ver_min = (__FreeBSD_version / 1000) % 100; req.ver_upd = (__FreeBSD_version / 100) % 10; return hwrm_send_message(softc, &req, sizeof(req)); } int bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown) { struct hwrm_func_drv_unrgtr_input req = {0}; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR); if (shutdown == true) req.flags |= HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN; return hwrm_send_message(softc, &req, sizeof(req)); } static inline int _is_valid_ether_addr(uint8_t *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) return (FALSE); return (TRUE); } static inline void get_random_ether_addr(uint8_t *addr) { uint8_t temp[ETHER_ADDR_LEN]; arc4rand(&temp, sizeof(temp), 0); temp[0] &= 0xFE; temp[0] |= 0x02; bcopy(temp, addr, sizeof(temp)); } int bnxt_hwrm_func_qcaps(struct bnxt_softc *softc) { int rc = 0; struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; struct bnxt_func_info *func = &softc->func; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS); req.fid = htole16(0xffff); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto fail; if (resp->flags & htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED)) softc->flags |= BNXT_FLAG_WOL_CAP; func->fw_fid = le16toh(resp->fid); memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN); func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx); func->max_cp_rings = le16toh(resp->max_cmpl_rings); func->max_tx_rings = le16toh(resp->max_tx_rings); func->max_rx_rings = le16toh(resp->max_rx_rings); func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps); if (!func->max_hw_ring_grps) func->max_hw_ring_grps = func->max_tx_rings; func->max_l2_ctxs = le16toh(resp->max_l2_ctxs); func->max_vnics = le16toh(resp->max_vnics); func->max_stat_ctxs = le16toh(resp->max_stat_ctx); if (BNXT_PF(softc)) { struct bnxt_pf_info *pf = &softc->pf; pf->port_id = le16toh(resp->port_id); pf->first_vf_id = le16toh(resp->first_vf_id); pf->max_vfs = le16toh(resp->max_vfs); pf->max_encap_records = le32toh(resp->max_encap_records); pf->max_decap_records = le32toh(resp->max_decap_records); pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows); pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows); pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows); pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows); } if (!_is_valid_ether_addr(func->mac_addr)) { device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n"); get_random_ether_addr(func->mac_addr); } fail: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_func_qcfg(struct bnxt_softc *softc) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg; int rc; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG); req.fid = htole16(0xffff); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto fail; fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings); fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings); fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings); fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics); fail: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_func_reset(struct bnxt_softc *softc) { struct hwrm_func_reset_input req = {0}; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET); req.enables = 0; return hwrm_send_message(softc, &req, sizeof(req)); } static void bnxt_hwrm_set_link_common(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req) { uint8_t autoneg = softc->link_info.autoneg; uint16_t fw_link_speed = softc->link_info.req_link_speed; if (autoneg & BNXT_AUTONEG_SPEED) { req->auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS; req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE); req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG); } else { req->force_link_speed = htole16(fw_link_speed); req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE); } /* tell chimp that the setting takes effect immediately */ req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY); } static void bnxt_hwrm_set_pause_common(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req) { struct bnxt_link_info *link_info = &softc->link_info; if (link_info->flow_ctrl.autoneg) { req->auto_pause = HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE; if (link_info->flow_ctrl.rx) req->auto_pause |= HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; if (link_info->flow_ctrl.tx) req->auto_pause |= HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE); } else { if (link_info->flow_ctrl.rx) req->force_pause |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; if (link_info->flow_ctrl.tx) req->force_pause |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE); } } /* JFV this needs interface connection */ static void bnxt_hwrm_set_eee(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req) { /* struct ethtool_eee *eee = &softc->eee; */ bool eee_enabled = false; if (eee_enabled) { #if 0 uint16_t eee_speeds; uint32_t flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE; if (eee->tx_lpi_enabled) flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI; req->flags |= htole32(flags); eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); req->eee_link_speed_mask = htole16(eee_speeds); req->tx_lpi_timer = htole32(eee->tx_lpi_timer); #endif } else { req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE); } } int bnxt_hwrm_set_link_setting(struct bnxt_softc *softc, bool set_pause, bool set_eee, bool set_link) { struct hwrm_port_phy_cfg_input req = {0}; int rc; if (softc->flags & BNXT_FLAG_NPAR) return ENOTSUP; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_CFG); if (set_pause) { bnxt_hwrm_set_pause_common(softc, &req); if (softc->link_info.flow_ctrl.autoneg) set_link = true; } if (set_link) bnxt_hwrm_set_link_common(softc, &req); if (set_eee) bnxt_hwrm_set_eee(softc, &req); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (!rc) { if (set_pause) { /* since changing of 'force pause' setting doesn't * trigger any link change event, the driver needs to * update the current pause result upon successfully i * return of the phy_cfg command */ if (!softc->link_info.flow_ctrl.autoneg) bnxt_report_link(softc); } } BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic) { struct hwrm_vnic_cfg_input req = {0}; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG); if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT) req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT); if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL) req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE); if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP) req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE); req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP | HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE | HWRM_VNIC_CFG_INPUT_ENABLES_MRU); req.vnic_id = htole16(vnic->id); req.dflt_ring_grp = htole16(vnic->def_ring_grp); req.rss_rule = htole16(vnic->rss_id); req.cos_rule = htole16(vnic->cos_rule); req.lb_rule = htole16(vnic->lb_rule); req.mru = htole16(vnic->mru); return hwrm_send_message(softc, &req, sizeof(req)); } int bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic) { struct hwrm_vnic_alloc_input req = {0}; struct hwrm_vnic_alloc_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc; if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) { device_printf(softc->dev, "Attempt to re-allocate vnic %04x\n", vnic->id); return EDOOFUS; } bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC); if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT) req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto fail; vnic->id = le32toh(resp->vnic_id); fail: BNXT_HWRM_UNLOCK(softc); return (rc); } int bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id) { struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc; if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) { device_printf(softc->dev, "Attempt to re-allocate vnic ctx %04x\n", *ctx_id); return EDOOFUS; } bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto fail; *ctx_id = le32toh(resp->rss_cos_lb_ctx_id); fail: BNXT_HWRM_UNLOCK(softc); return (rc); } int bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp) { struct hwrm_ring_grp_alloc_input req = {0}; struct hwrm_ring_grp_alloc_output *resp; int rc = 0; if (grp->grp_id != (uint16_t)HWRM_NA_SIGNATURE) { device_printf(softc->dev, "Attempt to re-allocate ring group %04x\n", grp->grp_id); return EDOOFUS; } resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC); req.cr = htole16(grp->cp_ring_id); req.rr = htole16(grp->rx_ring_id); req.ar = htole16(grp->ag_ring_id); req.sc = htole16(grp->stats_ctx); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto fail; grp->grp_id = le32toh(resp->ring_group_id); fail: BNXT_HWRM_UNLOCK(softc); return rc; } /* * Ring allocation message to the firmware */ int bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type, struct bnxt_ring *ring, uint16_t cmpl_ring_id, uint32_t stat_ctx_id, bool irq) { struct hwrm_ring_alloc_input req = {0}; struct hwrm_ring_alloc_output *resp; int rc; if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) { device_printf(softc->dev, "Attempt to re-allocate ring %04x\n", ring->phys_id); return EDOOFUS; } resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC); req.enables = htole32(0); req.fbo = htole32(0); if (stat_ctx_id != HWRM_NA_SIGNATURE) { req.enables |= htole32( HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID); req.stat_ctx_id = htole32(stat_ctx_id); } req.ring_type = type; req.page_tbl_addr = htole64(ring->paddr); req.length = htole32(ring->ring_size); req.logical_id = htole16(ring->id); req.cmpl_ring_id = htole16(cmpl_ring_id); req.queue_id = htole16(softc->q_info[0].id); #if 0 /* MODE_POLL appears to crash the firmware */ if (irq) req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX; else req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_POLL; #else req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX; #endif BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto fail; ring->phys_id = le16toh(resp->ring_id); fail: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr, uint64_t paddr) { struct hwrm_stat_ctx_alloc_input req = {0}; struct hwrm_stat_ctx_alloc_output *resp; int rc = 0; if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) { device_printf(softc->dev, "Attempt to re-allocate stats ctx %08x\n", cpr->stats_ctx_id); return EDOOFUS; } resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC); req.update_period_ms = htole32(1000); req.stats_dma_addr = htole64(paddr); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto fail; cpr->stats_ctx_id = le32toh(resp->stat_ctx_id); fail: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_port_qstats(struct bnxt_softc *softc) { struct hwrm_port_qstats_input req = {0}; int rc = 0; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS); req.port_id = htole16(softc->pf.port_id); req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr); req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic) { struct hwrm_cfa_l2_set_rx_mask_input req = {0}; struct bnxt_vlan_tag *tag; uint32_t *tags; - uint32_t num_vlan_tags = 0;; + uint32_t num_vlan_tags = 0; uint32_t i; uint32_t mask = vnic->rx_mask; int rc; SLIST_FOREACH(tag, &vnic->vlan_tags, next) num_vlan_tags++; if (num_vlan_tags) { if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN)) { if (!vnic->vlan_only) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN; else mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; } if (vnic->vlan_tag_list.idi_vaddr) { iflib_dma_free(&vnic->vlan_tag_list); vnic->vlan_tag_list.idi_vaddr = NULL; } rc = iflib_dma_alloc(softc->ctx, 4 * num_vlan_tags, &vnic->vlan_tag_list, BUS_DMA_NOWAIT); if (rc) return rc; tags = (uint32_t *)vnic->vlan_tag_list.idi_vaddr; i = 0; SLIST_FOREACH(tag, &vnic->vlan_tags, next) { tags[i] = htole32((tag->tpid << 16) | tag->tag); i++; } } bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK); req.vnic_id = htole32(vnic->id); req.mask = htole32(mask); req.mc_tbl_addr = htole64(vnic->mc_list.idi_paddr); req.num_mc_entries = htole32(vnic->mc_list_count); req.vlan_tag_tbl_addr = htole64(vnic->vlan_tag_list.idi_paddr); req.num_vlan_tags = htole32(num_vlan_tags); return hwrm_send_message(softc, &req, sizeof(req)); } int bnxt_hwrm_set_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic) { struct hwrm_cfa_l2_filter_alloc_input req = {0}; struct hwrm_cfa_l2_filter_alloc_output *resp; uint32_t enables = 0; int rc = 0; if (vnic->filter_id != -1) { device_printf(softc->dev, "Attempt to re-allocate l2 ctx filter\n"); return EDOOFUS; } resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC); req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX); enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID; req.enables = htole32(enables); req.dst_id = htole16(vnic->id); memcpy(req.l2_addr, if_getlladdr(iflib_get_ifp(softc->ctx)), ETHER_ADDR_LEN); memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask)); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto fail; vnic->filter_id = le64toh(resp->l2_filter_id); vnic->flow_id = le64toh(resp->flow_id); fail: BNXT_HWRM_UNLOCK(softc); return (rc); } int bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic, uint32_t hash_type) { struct hwrm_vnic_rss_cfg_input req = {0}; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG); req.hash_type = htole32(hash_type); req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr); req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr); req.rss_ctx_idx = htole16(vnic->rss_id); return hwrm_send_message(softc, &req, sizeof(req)); } int bnxt_cfg_async_cr(struct bnxt_softc *softc) { int rc = 0; if (BNXT_PF(softc)) { struct hwrm_func_cfg_input req = {0}; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG); req.fid = htole16(0xffff); req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = htole16(softc->def_cp_ring.ring.phys_id); rc = hwrm_send_message(softc, &req, sizeof(req)); } else { struct hwrm_func_vf_cfg_input req = {0}; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_CFG); req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = htole16(softc->def_cp_ring.ring.phys_id); rc = hwrm_send_message(softc, &req, sizeof(req)); } return rc; } void bnxt_validate_hw_lro_settings(struct bnxt_softc *softc) { softc->hw_lro.enable = min(softc->hw_lro.enable, 1); softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1); softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs, HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX); softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs, HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX); softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU); } int bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc) { struct hwrm_vnic_tpa_cfg_input req = {0}; uint32_t flags; if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) { return 0; } bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG); if (softc->hw_lro.enable) { flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ; if (softc->hw_lro.is_mode_gro) flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO; else flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE; req.flags = htole32(flags); req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS | HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS | HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN); req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs); req.max_aggs = htole16(softc->hw_lro.max_aggs); req.min_agg_len = htole32(softc->hw_lro.min_agg_len); } req.vnic_id = htole16(softc->vnic_info.id); return hwrm_send_message(softc, &req, sizeof(req)); } int bnxt_hwrm_nvm_find_dir_entry(struct bnxt_softc *softc, uint16_t type, uint16_t *ordinal, uint16_t ext, uint16_t *index, bool use_index, uint8_t search_opt, uint32_t *data_length, uint32_t *item_length, uint32_t *fw_ver) { struct hwrm_nvm_find_dir_entry_input req = {0}; struct hwrm_nvm_find_dir_entry_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc = 0; uint32_t old_timeo; MPASS(ordinal); bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_FIND_DIR_ENTRY); if (use_index) { req.enables = htole32( HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID); req.dir_idx = htole16(*index); } req.dir_type = htole16(type); req.dir_ordinal = htole16(*ordinal); req.dir_ext = htole16(ext); req.opt_ordinal = search_opt; BNXT_HWRM_LOCK(softc); old_timeo = softc->hwrm_cmd_timeo; softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO; rc = _hwrm_send_message(softc, &req, sizeof(req)); softc->hwrm_cmd_timeo = old_timeo; if (rc) goto exit; if (item_length) *item_length = le32toh(resp->dir_item_length); if (data_length) *data_length = le32toh(resp->dir_data_length); if (fw_ver) *fw_ver = le32toh(resp->fw_ver); *ordinal = le16toh(resp->dir_ordinal); if (index) *index = le16toh(resp->dir_idx); exit: BNXT_HWRM_UNLOCK(softc); return (rc); } int bnxt_hwrm_nvm_read(struct bnxt_softc *softc, uint16_t index, uint32_t offset, uint32_t length, struct iflib_dma_info *data) { struct hwrm_nvm_read_input req = {0}; int rc; uint32_t old_timeo; if (length > data->idi_size) { rc = EINVAL; goto exit; } bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_READ); req.host_dest_addr = htole64(data->idi_paddr); req.dir_idx = htole16(index); req.offset = htole32(offset); req.len = htole32(length); BNXT_HWRM_LOCK(softc); old_timeo = softc->hwrm_cmd_timeo; softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO; rc = _hwrm_send_message(softc, &req, sizeof(req)); softc->hwrm_cmd_timeo = old_timeo; BNXT_HWRM_UNLOCK(softc); if (rc) goto exit; bus_dmamap_sync(data->idi_tag, data->idi_map, BUS_DMASYNC_POSTREAD); goto exit; exit: return rc; } int bnxt_hwrm_nvm_modify(struct bnxt_softc *softc, uint16_t index, uint32_t offset, void *data, bool cpyin, uint32_t length) { struct hwrm_nvm_modify_input req = {0}; struct iflib_dma_info dma_data; int rc; uint32_t old_timeo; if (length == 0 || !data) return EINVAL; rc = iflib_dma_alloc(softc->ctx, length, &dma_data, BUS_DMA_NOWAIT); if (rc) return ENOMEM; if (cpyin) { rc = copyin(data, dma_data.idi_vaddr, length); if (rc) goto exit; } else memcpy(dma_data.idi_vaddr, data, length); bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_MODIFY); req.host_src_addr = htole64(dma_data.idi_paddr); req.dir_idx = htole16(index); req.offset = htole32(offset); req.len = htole32(length); BNXT_HWRM_LOCK(softc); old_timeo = softc->hwrm_cmd_timeo; softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO; rc = _hwrm_send_message(softc, &req, sizeof(req)); softc->hwrm_cmd_timeo = old_timeo; BNXT_HWRM_UNLOCK(softc); exit: iflib_dma_free(&dma_data); return rc; } int bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor, uint8_t *selfreset) { struct hwrm_fw_reset_input req = {0}; struct hwrm_fw_reset_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc; MPASS(selfreset); bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET); req.embedded_proc_type = processor; req.selfrst_status = *selfreset; BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto exit; *selfreset = resp->selfrst_status; exit: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset) { struct hwrm_fw_qstatus_input req = {0}; struct hwrm_fw_qstatus_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc; MPASS(selfreset); bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS); req.embedded_proc_type = type; BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto exit; *selfreset = resp->selfrst_status; exit: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_nvm_write(struct bnxt_softc *softc, void *data, bool cpyin, uint16_t type, uint16_t ordinal, uint16_t ext, uint16_t attr, uint16_t option, uint32_t data_length, bool keep, uint32_t *item_length, uint16_t *index) { struct hwrm_nvm_write_input req = {0}; struct hwrm_nvm_write_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; struct iflib_dma_info dma_data; int rc; uint32_t old_timeo; if (data_length) { rc = iflib_dma_alloc(softc->ctx, data_length, &dma_data, BUS_DMA_NOWAIT); if (rc) return ENOMEM; if (cpyin) { rc = copyin(data, dma_data.idi_vaddr, data_length); if (rc) goto early_exit; } else memcpy(dma_data.idi_vaddr, data, data_length); bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } else dma_data.idi_paddr = 0; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_WRITE); req.host_src_addr = htole64(dma_data.idi_paddr); req.dir_type = htole16(type); req.dir_ordinal = htole16(ordinal); req.dir_ext = htole16(ext); req.dir_attr = htole16(attr); req.dir_data_length = htole32(data_length); req.option = htole16(option); if (keep) { req.flags = htole16(HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG); } if (item_length) req.dir_item_length = htole32(*item_length); BNXT_HWRM_LOCK(softc); old_timeo = softc->hwrm_cmd_timeo; softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO; rc = _hwrm_send_message(softc, &req, sizeof(req)); softc->hwrm_cmd_timeo = old_timeo; if (rc) goto exit; if (item_length) *item_length = le32toh(resp->dir_item_length); if (index) *index = le16toh(resp->dir_idx); exit: BNXT_HWRM_UNLOCK(softc); early_exit: if (data_length) iflib_dma_free(&dma_data); return rc; } int bnxt_hwrm_nvm_erase_dir_entry(struct bnxt_softc *softc, uint16_t index) { struct hwrm_nvm_erase_dir_entry_input req = {0}; uint32_t old_timeo; int rc; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_ERASE_DIR_ENTRY); req.dir_idx = htole16(index); BNXT_HWRM_LOCK(softc); old_timeo = softc->hwrm_cmd_timeo; softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO; rc = _hwrm_send_message(softc, &req, sizeof(req)); softc->hwrm_cmd_timeo = old_timeo; BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_nvm_get_dir_info(struct bnxt_softc *softc, uint32_t *entries, uint32_t *entry_length) { struct hwrm_nvm_get_dir_info_input req = {0}; struct hwrm_nvm_get_dir_info_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc; uint32_t old_timeo; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_INFO); BNXT_HWRM_LOCK(softc); old_timeo = softc->hwrm_cmd_timeo; softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO; rc = _hwrm_send_message(softc, &req, sizeof(req)); softc->hwrm_cmd_timeo = old_timeo; if (rc) goto exit; if (entries) *entries = le32toh(resp->entries); if (entry_length) *entry_length = le32toh(resp->entry_length); exit: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_nvm_get_dir_entries(struct bnxt_softc *softc, uint32_t *entries, uint32_t *entry_length, struct iflib_dma_info *dma_data) { struct hwrm_nvm_get_dir_entries_input req = {0}; uint32_t ent; uint32_t ent_len; int rc; uint32_t old_timeo; if (!entries) entries = &ent; if (!entry_length) entry_length = &ent_len; rc = bnxt_hwrm_nvm_get_dir_info(softc, entries, entry_length); if (rc) goto exit; if (*entries * *entry_length > dma_data->idi_size) { rc = EINVAL; goto exit; } /* * TODO: There's a race condition here that could blow up DMA memory... * we need to allocate the max size, not the currently in use * size. The command should totally have a max size here. */ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_ENTRIES); req.host_dest_addr = htole64(dma_data->idi_paddr); BNXT_HWRM_LOCK(softc); old_timeo = softc->hwrm_cmd_timeo; softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO; rc = _hwrm_send_message(softc, &req, sizeof(req)); softc->hwrm_cmd_timeo = old_timeo; BNXT_HWRM_UNLOCK(softc); if (rc) goto exit; bus_dmamap_sync(dma_data->idi_tag, dma_data->idi_map, BUS_DMASYNC_POSTWRITE); exit: return rc; } int bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id, uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size, uint32_t *reserved_size, uint32_t *available_size) { struct hwrm_nvm_get_dev_info_input req = {0}; struct hwrm_nvm_get_dev_info_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc; uint32_t old_timeo; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO); BNXT_HWRM_LOCK(softc); old_timeo = softc->hwrm_cmd_timeo; softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO; rc = _hwrm_send_message(softc, &req, sizeof(req)); softc->hwrm_cmd_timeo = old_timeo; if (rc) goto exit; if (mfg_id) *mfg_id = le16toh(resp->manufacturer_id); if (device_id) *device_id = le16toh(resp->device_id); if (sector_size) *sector_size = le32toh(resp->sector_size); if (nvram_size) *nvram_size = le32toh(resp->nvram_size); if (reserved_size) *reserved_size = le32toh(resp->reserved_size); if (available_size) *available_size = le32toh(resp->available_size); exit: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_nvm_install_update(struct bnxt_softc *softc, uint32_t install_type, uint64_t *installed_items, uint8_t *result, uint8_t *problem_item, uint8_t *reset_required) { struct hwrm_nvm_install_update_input req = {0}; struct hwrm_nvm_install_update_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc; uint32_t old_timeo; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_INSTALL_UPDATE); req.install_type = htole32(install_type); BNXT_HWRM_LOCK(softc); old_timeo = softc->hwrm_cmd_timeo; softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO; rc = _hwrm_send_message(softc, &req, sizeof(req)); softc->hwrm_cmd_timeo = old_timeo; if (rc) goto exit; if (installed_items) *installed_items = le32toh(resp->installed_items); if (result) *result = resp->result; if (problem_item) *problem_item = resp->problem_item; if (reset_required) *reset_required = resp->reset_required; exit: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_nvm_verify_update(struct bnxt_softc *softc, uint16_t type, uint16_t ordinal, uint16_t ext) { struct hwrm_nvm_verify_update_input req = {0}; uint32_t old_timeo; int rc; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_VERIFY_UPDATE); req.dir_type = htole16(type); req.dir_ordinal = htole16(ordinal); req.dir_ext = htole16(ext); BNXT_HWRM_LOCK(softc); old_timeo = softc->hwrm_cmd_timeo; softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO; rc = _hwrm_send_message(softc, &req, sizeof(req)); softc->hwrm_cmd_timeo = old_timeo; BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month, uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second, uint16_t *millisecond, uint16_t *zone) { struct hwrm_fw_get_time_input req = {0}; struct hwrm_fw_get_time_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME); BNXT_HWRM_LOCK(softc); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto exit; if (year) *year = le16toh(resp->year); if (month) *month = resp->month; if (day) *day = resp->day; if (hour) *hour = resp->hour; if (minute) *minute = resp->minute; if (second) *second = resp->second; if (millisecond) *millisecond = le16toh(resp->millisecond); if (zone) *zone = le16toh(resp->zone); exit: BNXT_HWRM_UNLOCK(softc); return rc; } int bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month, uint8_t day, uint8_t hour, uint8_t minute, uint8_t second, uint16_t millisecond, uint16_t zone) { struct hwrm_fw_set_time_input req = {0}; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME); req.year = htole16(year); req.month = month; req.day = day; req.hour = hour; req.minute = minute; req.second = second; req.millisecond = htole16(millisecond); req.zone = htole16(zone); return hwrm_send_message(softc, &req, sizeof(req)); } int bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc) { struct bnxt_link_info *link_info = &softc->link_info; struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc = 0; BNXT_HWRM_LOCK(softc); bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG); rc = _hwrm_send_message(softc, &req, sizeof(req)); if (rc) goto exit; link_info->phy_link_status = resp->link; link_info->duplex = resp->duplex_cfg; link_info->auto_mode = resp->auto_mode; /* * When AUTO_PAUSE_AUTONEG_PAUSE bit is set to 1, * the advertisement of pause is enabled. * 1. When the auto_mode is not set to none and this flag is set to 1, * then the auto_pause bits on this port are being advertised and * autoneg pause results are being interpreted. * 2. When the auto_mode is not set to none and this flag is set to 0, * the pause is forced as indicated in force_pause, and also * advertised as auto_pause bits, but the autoneg results are not * interpreted since the pause configuration is being forced. * 3. When the auto_mode is set to none and this flag is set to 1, * auto_pause bits should be ignored and should be set to 0. */ link_info->flow_ctrl.autoneg = false; link_info->flow_ctrl.tx = false; link_info->flow_ctrl.rx = false; if ((resp->auto_mode) && (resp->auto_pause & BNXT_AUTO_PAUSE_AUTONEG_PAUSE)) { link_info->flow_ctrl.autoneg = true; } if (link_info->flow_ctrl.autoneg) { if (resp->auto_pause & BNXT_PAUSE_TX) link_info->flow_ctrl.tx = true; if (resp->auto_pause & BNXT_PAUSE_RX) link_info->flow_ctrl.rx = true; } else { if (resp->force_pause & BNXT_PAUSE_TX) link_info->flow_ctrl.tx = true; if (resp->force_pause & BNXT_PAUSE_RX) link_info->flow_ctrl.rx = true; } link_info->duplex_setting = resp->duplex_cfg; if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) link_info->link_speed = le16toh(resp->link_speed); else link_info->link_speed = 0; link_info->force_link_speed = le16toh(resp->force_link_speed); link_info->auto_link_speed = le16toh(resp->auto_link_speed); link_info->support_speeds = le16toh(resp->support_speeds); link_info->auto_link_speeds = le16toh(resp->auto_link_speed_mask); link_info->preemphasis = le32toh(resp->preemphasis); link_info->phy_ver[0] = resp->phy_maj; link_info->phy_ver[1] = resp->phy_min; link_info->phy_ver[2] = resp->phy_bld; snprintf(softc->ver_info->phy_ver, sizeof(softc->ver_info->phy_ver), "%d.%d.%d", link_info->phy_ver[0], link_info->phy_ver[1], link_info->phy_ver[2]); strlcpy(softc->ver_info->phy_vendor, resp->phy_vendor_name, BNXT_NAME_SIZE); strlcpy(softc->ver_info->phy_partnumber, resp->phy_vendor_partnumber, BNXT_NAME_SIZE); link_info->media_type = resp->media_type; link_info->phy_type = resp->phy_type; link_info->transceiver = resp->xcvr_pkg_type; link_info->phy_addr = resp->eee_config_phy_addr & HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK; exit: BNXT_HWRM_UNLOCK(softc); return rc; } uint16_t bnxt_hwrm_get_wol_fltrs(struct bnxt_softc *softc, uint16_t handle) { struct hwrm_wol_filter_qcfg_input req = {0}; struct hwrm_wol_filter_qcfg_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; uint16_t next_handle = 0; int rc; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_QCFG); req.port_id = htole16(softc->pf.port_id); req.handle = htole16(handle); rc = hwrm_send_message(softc, &req, sizeof(req)); if (!rc) { next_handle = le16toh(resp->next_handle); if (next_handle != 0) { if (resp->wol_type == HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT) { softc->wol = 1; softc->wol_filter_id = resp->wol_filter_id; } } } return next_handle; } int bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc *softc) { struct hwrm_wol_filter_alloc_input req = {0}; struct hwrm_wol_filter_alloc_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr; int rc; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_ALLOC); req.port_id = htole16(softc->pf.port_id); req.wol_type = HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT; req.enables = htole32(HWRM_WOL_FILTER_ALLOC_INPUT_ENABLES_MAC_ADDRESS); memcpy(req.mac_address, softc->func.mac_addr, ETHER_ADDR_LEN); rc = hwrm_send_message(softc, &req, sizeof(req)); if (!rc) softc->wol_filter_id = resp->wol_filter_id; return rc; } int bnxt_hwrm_free_wol_fltr(struct bnxt_softc *softc) { struct hwrm_wol_filter_free_input req = {0}; bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_FREE); req.port_id = htole16(softc->pf.port_id); req.enables = htole32(HWRM_WOL_FILTER_FREE_INPUT_ENABLES_WOL_FILTER_ID); req.wol_filter_id = softc->wol_filter_id; return hwrm_send_message(softc, &req, sizeof(req)); } static void bnxt_hwrm_set_coal_params(struct bnxt_softc *softc, uint32_t max_frames, uint32_t buf_tmrs, uint16_t flags, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) { req->flags = htole16(flags); req->num_cmpl_dma_aggr = htole16((uint16_t)max_frames); req->num_cmpl_dma_aggr_during_int = htole16(max_frames >> 16); req->cmpl_aggr_dma_tmr = htole16((uint16_t)buf_tmrs); req->cmpl_aggr_dma_tmr_during_int = htole16(buf_tmrs >> 16); /* Minimum time between 2 interrupts set to buf_tmr x 2 */ req->int_lat_tmr_min = htole16((uint16_t)buf_tmrs * 2); req->int_lat_tmr_max = htole16((uint16_t)buf_tmrs * 4); req->num_cmpl_aggr_int = htole16((uint16_t)max_frames * 4); } int bnxt_hwrm_set_coal(struct bnxt_softc *softc) { int i, rc = 0; struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, req_tx = {0}, *req; uint16_t max_buf, max_buf_irq; uint16_t buf_tmr, buf_tmr_irq; uint32_t flags; bnxt_hwrm_cmd_hdr_init(softc, &req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); bnxt_hwrm_cmd_hdr_init(softc, &req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); /* Each rx completion (2 records) should be DMAed immediately. * DMA 1/4 of the completion buffers at a time. */ max_buf = min_t(uint16_t, softc->rx_coal_frames / 4, 2); /* max_buf must not be zero */ max_buf = clamp_t(uint16_t, max_buf, 1, 63); max_buf_irq = clamp_t(uint16_t, softc->rx_coal_frames_irq, 1, 63); buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs); /* buf timer set to 1/4 of interrupt timer */ buf_tmr = max_t(uint16_t, buf_tmr / 4, 1); buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs_irq); buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1); flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET; /* RING_IDLE generates more IRQs for lower latency. Enable it only * if coal_usecs is less than 25 us. */ if (softc->rx_coal_usecs < 25) flags |= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE; bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf, buf_tmr_irq << 16 | buf_tmr, flags, &req_rx); /* max_buf must not be zero */ max_buf = clamp_t(uint16_t, softc->tx_coal_frames, 1, 63); max_buf_irq = clamp_t(uint16_t, softc->tx_coal_frames_irq, 1, 63); buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs); /* buf timer set to 1/4 of interrupt timer */ buf_tmr = max_t(uint16_t, buf_tmr / 4, 1); buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs_irq); buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1); flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET; bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf, buf_tmr_irq << 16 | buf_tmr, flags, &req_tx); for (i = 0; i < softc->nrxqsets; i++) { req = &req_rx; /* * TBD: * Check if Tx also needs to be done * So far, Tx processing has been done in softirq contest * * req = &req_tx; */ req->ring_id = htole16(softc->grp_info[i].cp_ring_id); rc = hwrm_send_message(softc, req, sizeof(*req)); if (rc) break; } return rc; } int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc, unsigned long *bmap, int bmap_size) { struct hwrm_func_drv_rgtr_input req = {0}; bitstr_t *async_events_bmap; uint32_t *events; int i; #define AE_BMAP_SZ_BITS 256 async_events_bmap = bit_alloc(AE_BMAP_SZ_BITS, M_DEVBUF, M_WAITOK); bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR); req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD); bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED); bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE); bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE); if (bmap && bmap_size) { for (i = 0; i < bmap_size; i++) { if (bit_test(bmap, i)) bit_set(async_events_bmap, i); } } #define AE_BMAP_SZ_WORDS (AE_BMAP_SZ_BITS / 8 / sizeof(uint32_t)) events = (uint32_t *)async_events_bmap; for (i = 0; i < AE_BMAP_SZ_WORDS; i++) req.async_event_fwd[i] |= htole32(events[i]); #undef AE_BMAP_SZ_WORDS #undef AE_BMAP_SZ_BITS free(async_events_bmap, M_DEVBUF); return hwrm_send_message(softc, &req, sizeof(req)); } Index: head/sys/dev/bnxt/bnxt_sysctl.c =================================================================== --- head/sys/dev/bnxt/bnxt_sysctl.c (revision 359440) +++ head/sys/dev/bnxt/bnxt_sysctl.c (revision 359441) @@ -1,1420 +1,1420 @@ /*- * Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2016 Broadcom, All Rights Reserved. * The term Broadcom refers to Broadcom Limited and/or its subsidiaries * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include "bnxt.h" #include "bnxt_hwrm.h" #include "bnxt_sysctl.h" static int bnxt_vlan_only_sysctl(SYSCTL_HANDLER_ARGS); /* * We want to create: * dev.bnxt.0.hwstats.txq0 * dev.bnxt.0.hwstats.txq0.txmbufs * dev.bnxt.0.hwstats.rxq0 * dev.bnxt.0.hwstats.txq0.rxmbufs * so the hwstats ctx list needs to be created in attach_post and populated * during init. * * Then, it needs to be cleaned up in stop. */ int bnxt_init_sysctl_ctx(struct bnxt_softc *softc) { struct sysctl_ctx_list *ctx; sysctl_ctx_init(&softc->hw_stats); ctx = device_get_sysctl_ctx(softc->dev); softc->hw_stats_oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev)), OID_AUTO, "hwstats", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "hardware statistics"); if (!softc->hw_stats_oid) { sysctl_ctx_free(&softc->hw_stats); return ENOMEM; } sysctl_ctx_init(&softc->ver_info->ver_ctx); ctx = device_get_sysctl_ctx(softc->dev); softc->ver_info->ver_oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev)), OID_AUTO, "ver", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "hardware/firmware version information"); if (!softc->ver_info->ver_oid) { sysctl_ctx_free(&softc->ver_info->ver_ctx); return ENOMEM; } if (BNXT_PF(softc)) { sysctl_ctx_init(&softc->nvm_info->nvm_ctx); ctx = device_get_sysctl_ctx(softc->dev); softc->nvm_info->nvm_oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev)), OID_AUTO, "nvram", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "nvram information"); if (!softc->nvm_info->nvm_oid) { sysctl_ctx_free(&softc->nvm_info->nvm_ctx); return ENOMEM; } } sysctl_ctx_init(&softc->hw_lro_ctx); ctx = device_get_sysctl_ctx(softc->dev); softc->hw_lro_oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev)), OID_AUTO, "hw_lro", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "hardware lro"); if (!softc->hw_lro_oid) { sysctl_ctx_free(&softc->hw_lro_ctx); return ENOMEM; } sysctl_ctx_init(&softc->flow_ctrl_ctx); ctx = device_get_sysctl_ctx(softc->dev); softc->flow_ctrl_oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev)), OID_AUTO, "fc", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "flow ctrl"); if (!softc->flow_ctrl_oid) { sysctl_ctx_free(&softc->flow_ctrl_ctx); return ENOMEM; } return 0; } int bnxt_free_sysctl_ctx(struct bnxt_softc *softc) { int orc; int rc = 0; if (softc->hw_stats_oid != NULL) { orc = sysctl_ctx_free(&softc->hw_stats); if (orc) rc = orc; else softc->hw_stats_oid = NULL; } if (softc->ver_info->ver_oid != NULL) { orc = sysctl_ctx_free(&softc->ver_info->ver_ctx); if (orc) rc = orc; else softc->ver_info->ver_oid = NULL; } if (BNXT_PF(softc) && softc->nvm_info->nvm_oid != NULL) { orc = sysctl_ctx_free(&softc->nvm_info->nvm_ctx); if (orc) rc = orc; else softc->nvm_info->nvm_oid = NULL; } if (softc->hw_lro_oid != NULL) { orc = sysctl_ctx_free(&softc->hw_lro_ctx); if (orc) rc = orc; else softc->hw_lro_oid = NULL; } if (softc->flow_ctrl_oid != NULL) { orc = sysctl_ctx_free(&softc->flow_ctrl_ctx); if (orc) rc = orc; else softc->flow_ctrl_oid = NULL; } return rc; } int bnxt_create_tx_sysctls(struct bnxt_softc *softc, int txr) { struct sysctl_oid *oid; struct ctx_hw_stats *tx_stats = (void *)softc->tx_stats.idi_vaddr; char name[32]; char desc[64]; sprintf(name, "txq%d", txr); sprintf(desc, "transmit queue %d", txr); oid = SYSCTL_ADD_NODE(&softc->hw_stats, SYSCTL_CHILDREN(softc->hw_stats_oid), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, desc); if (!oid) return ENOMEM; SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "ucast_pkts", CTLFLAG_RD, &tx_stats[txr].tx_ucast_pkts, "unicast packets sent"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "mcast_pkts", CTLFLAG_RD, &tx_stats[txr].tx_mcast_pkts, "multicast packets sent"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "bcast_pkts", CTLFLAG_RD, &tx_stats[txr].tx_bcast_pkts, "broadcast packets sent"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "discard_pkts", CTLFLAG_RD, &tx_stats[txr].tx_discard_pkts, "discarded transmit packets"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "drop_pkts", CTLFLAG_RD, &tx_stats[txr].tx_drop_pkts, "dropped transmit packets"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "ucast_bytes", CTLFLAG_RD, &tx_stats[txr].tx_ucast_bytes, "unicast bytes sent"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "mcast_bytes", CTLFLAG_RD, &tx_stats[txr].tx_mcast_bytes, "multicast bytes sent"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "bcast_bytes", CTLFLAG_RD, &tx_stats[txr].tx_bcast_bytes, "broadcast bytes sent"); return 0; } int bnxt_create_port_stats_sysctls(struct bnxt_softc *softc) { struct sysctl_oid *oid; char name[32]; char desc[64]; sprintf(name, "port_stats"); sprintf(desc, "Port Stats"); oid = SYSCTL_ADD_NODE(&softc->hw_stats, SYSCTL_CHILDREN(softc->hw_stats_oid), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, desc); if (!oid) return ENOMEM; SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_64b_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_64b_frames, "Transmitted 64b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_65b_127b_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_65b_127b_frames, "Transmitted 65b 127b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_128b_255b_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_128b_255b_frames, "Transmitted 128b 255b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_256b_511b_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_256b_511b_frames, "Transmitted 256b 511b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_512b_1023b_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_512b_1023b_frames, "Transmitted 512b 1023b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_1024b_1518_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_1024b_1518_frames, "Transmitted 1024b 1518 frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_good_vlan_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_good_vlan_frames, "Transmitted good vlan frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_1519b_2047_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_1519b_2047_frames, "Transmitted 1519b 2047 frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_2048b_4095b_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_2048b_4095b_frames, "Transmitted 2048b 4095b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_4096b_9216b_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_4096b_9216b_frames, "Transmitted 4096b 9216b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_9217b_16383b_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_9217b_16383b_frames, "Transmitted 9217b 16383b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_good_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_good_frames, "Transmitted good frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_total_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_total_frames, "Transmitted total frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_ucast_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_ucast_frames, "Transmitted ucast frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_mcast_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_mcast_frames, "Transmitted mcast frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_bcast_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_bcast_frames, "Transmitted bcast frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_pause_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_pause_frames, "Transmitted pause frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_pfc_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_pfc_frames, "Transmitted pfc frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_jabber_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_jabber_frames, "Transmitted jabber frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_fcs_err_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_fcs_err_frames, "Transmitted fcs err frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_control_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_control_frames, "Transmitted control frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_oversz_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_oversz_frames, "Transmitted oversz frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_single_dfrl_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_single_dfrl_frames, "Transmitted single dfrl frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_multi_dfrl_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_multi_dfrl_frames, "Transmitted multi dfrl frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_single_coll_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_single_coll_frames, "Transmitted single coll frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_multi_coll_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_multi_coll_frames, "Transmitted multi coll frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_late_coll_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_late_coll_frames, "Transmitted late coll frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_excessive_coll_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_excessive_coll_frames, "Transmitted excessive coll frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_frag_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_frag_frames, "Transmitted frag frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_err", CTLFLAG_RD, &softc->tx_port_stats->tx_err, "Transmitted err"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_tagged_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_tagged_frames, "Transmitted tagged frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_dbl_tagged_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_dbl_tagged_frames, "Transmitted dbl tagged frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_runt_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_runt_frames, "Transmitted runt frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_fifo_underruns", CTLFLAG_RD, &softc->tx_port_stats->tx_fifo_underruns, "Transmitted fifo underruns"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_pfc_ena_frames_pri0", CTLFLAG_RD, &softc->tx_port_stats->tx_pfc_ena_frames_pri0, "Transmitted pfc ena frames pri0"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_pfc_ena_frames_pri1", CTLFLAG_RD, &softc->tx_port_stats->tx_pfc_ena_frames_pri1, "Transmitted pfc ena frames pri1"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_pfc_ena_frames_pri2", CTLFLAG_RD, &softc->tx_port_stats->tx_pfc_ena_frames_pri2, "Transmitted pfc ena frames pri2"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_pfc_ena_frames_pri3", CTLFLAG_RD, &softc->tx_port_stats->tx_pfc_ena_frames_pri3, "Transmitted pfc ena frames pri3"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_pfc_ena_frames_pri4", CTLFLAG_RD, &softc->tx_port_stats->tx_pfc_ena_frames_pri4, "Transmitted pfc ena frames pri4"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_pfc_ena_frames_pri5", CTLFLAG_RD, &softc->tx_port_stats->tx_pfc_ena_frames_pri5, "Transmitted pfc ena frames pri5"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_pfc_ena_frames_pri6", CTLFLAG_RD, &softc->tx_port_stats->tx_pfc_ena_frames_pri6, "Transmitted pfc ena frames pri6"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_pfc_ena_frames_pri7", CTLFLAG_RD, &softc->tx_port_stats->tx_pfc_ena_frames_pri7, "Transmitted pfc ena frames pri7"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_eee_lpi_events", CTLFLAG_RD, &softc->tx_port_stats->tx_eee_lpi_events, "Transmitted eee lpi events"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_eee_lpi_duration", CTLFLAG_RD, &softc->tx_port_stats->tx_eee_lpi_duration, "Transmitted eee lpi duration"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_llfc_logical_msgs", CTLFLAG_RD, &softc->tx_port_stats->tx_llfc_logical_msgs, "Transmitted llfc logical msgs"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_hcfc_msgs", CTLFLAG_RD, &softc->tx_port_stats->tx_hcfc_msgs, "Transmitted hcfc msgs"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_total_collisions", CTLFLAG_RD, &softc->tx_port_stats->tx_total_collisions, "Transmitted total collisions"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_bytes", CTLFLAG_RD, &softc->tx_port_stats->tx_bytes, "Transmitted bytes"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_xthol_frames", CTLFLAG_RD, &softc->tx_port_stats->tx_xthol_frames, "Transmitted xthol frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_stat_discard", CTLFLAG_RD, &softc->tx_port_stats->tx_stat_discard, "Transmitted stat discard"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tx_stat_error", CTLFLAG_RD, &softc->tx_port_stats->tx_stat_error, "Transmitted stat error"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_64b_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_64b_frames, "Received 64b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_65b_127b_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_65b_127b_frames, "Received 65b 127b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_128b_255b_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_128b_255b_frames, "Received 128b 255b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_256b_511b_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_256b_511b_frames, "Received 256b 511b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_512b_1023b_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_512b_1023b_frames, "Received 512b 1023b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_1024b_1518_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_1024b_1518_frames, "Received 1024b 1518 frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_good_vlan_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_good_vlan_frames, "Received good vlan frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_1519b_2047b_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_1519b_2047b_frames, "Received 1519b 2047b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_2048b_4095b_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_2048b_4095b_frames, "Received 2048b 4095b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_4096b_9216b_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_4096b_9216b_frames, "Received 4096b 9216b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_9217b_16383b_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_9217b_16383b_frames, "Received 9217b 16383b frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_total_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_total_frames, "Received total frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_ucast_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_ucast_frames, "Received ucast frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_mcast_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_mcast_frames, "Received mcast frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_bcast_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_bcast_frames, "Received bcast frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_fcs_err_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_fcs_err_frames, "Received fcs err frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_ctrl_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_ctrl_frames, "Received ctrl frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pause_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_pause_frames, "Received pause frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_frames, "Received pfc frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_unsupported_opcode_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_unsupported_opcode_frames, "Received unsupported opcode frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_unsupported_da_pausepfc_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_unsupported_da_pausepfc_frames, "Received unsupported da pausepfc frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_wrong_sa_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_wrong_sa_frames, "Received wrong sa frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_align_err_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_align_err_frames, "Received align err frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_oor_len_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_oor_len_frames, "Received oor len frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_code_err_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_code_err_frames, "Received code err frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_false_carrier_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_false_carrier_frames, "Received false carrier frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_ovrsz_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_ovrsz_frames, "Received ovrsz frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_jbr_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_jbr_frames, "Received jbr frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_mtu_err_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_mtu_err_frames, "Received mtu err frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_match_crc_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_match_crc_frames, "Received match crc frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_promiscuous_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_promiscuous_frames, "Received promiscuous frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_tagged_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_tagged_frames, "Received tagged frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_double_tagged_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_double_tagged_frames, "Received double tagged frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_trunc_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_trunc_frames, "Received trunc frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_good_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_good_frames, "Received good frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_xon2xoff_frames_pri0", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_xon2xoff_frames_pri0, "Received pfc xon2xoff frames pri0"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_xon2xoff_frames_pri1", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_xon2xoff_frames_pri1, "Received pfc xon2xoff frames pri1"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_xon2xoff_frames_pri2", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_xon2xoff_frames_pri2, "Received pfc xon2xoff frames pri2"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_xon2xoff_frames_pri3", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_xon2xoff_frames_pri3, "Received pfc xon2xoff frames pri3"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_xon2xoff_frames_pri4", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_xon2xoff_frames_pri4, "Received pfc xon2xoff frames pri4"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_xon2xoff_frames_pri5", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_xon2xoff_frames_pri5, "Received pfc xon2xoff frames pri5"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_xon2xoff_frames_pri6", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_xon2xoff_frames_pri6, "Received pfc xon2xoff frames pri6"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_xon2xoff_frames_pri7", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_xon2xoff_frames_pri7, "Received pfc xon2xoff frames pri7"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_ena_frames_pri0", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_ena_frames_pri0, "Received pfc ena frames pri0"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_ena_frames_pri1", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_ena_frames_pri1, "Received pfc ena frames pri1"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_ena_frames_pri2", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_ena_frames_pri2, "Received pfc ena frames pri2"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_ena_frames_pri3", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_ena_frames_pri3, "Received pfc ena frames pri3"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_ena_frames_pri4", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_ena_frames_pri4, "Received pfc ena frames pri4"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_ena_frames_pri5", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_ena_frames_pri5, "Received pfc ena frames pri5"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_ena_frames_pri6", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_ena_frames_pri6, "Received pfc ena frames pri6"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_pfc_ena_frames_pri7", CTLFLAG_RD, &softc->rx_port_stats->rx_pfc_ena_frames_pri7, "Received pfc ena frames pri7"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_sch_crc_err_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_sch_crc_err_frames, "Received sch crc err frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_undrsz_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_undrsz_frames, "Received undrsz frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_frag_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_frag_frames, "Received frag frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_eee_lpi_events", CTLFLAG_RD, &softc->rx_port_stats->rx_eee_lpi_events, "Received eee lpi events"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_eee_lpi_duration", CTLFLAG_RD, &softc->rx_port_stats->rx_eee_lpi_duration, "Received eee lpi duration"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_llfc_physical_msgs", CTLFLAG_RD, &softc->rx_port_stats->rx_llfc_physical_msgs, "Received llfc physical msgs"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_llfc_logical_msgs", CTLFLAG_RD, &softc->rx_port_stats->rx_llfc_logical_msgs, "Received llfc logical msgs"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_llfc_msgs_with_crc_err", CTLFLAG_RD, &softc->rx_port_stats->rx_llfc_msgs_with_crc_err, "Received llfc msgs with crc err"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_hcfc_msgs", CTLFLAG_RD, &softc->rx_port_stats->rx_hcfc_msgs, "Received hcfc msgs"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_hcfc_msgs_with_crc_err", CTLFLAG_RD, &softc->rx_port_stats->rx_hcfc_msgs_with_crc_err, "Received hcfc msgs with crc err"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_bytes", CTLFLAG_RD, &softc->rx_port_stats->rx_bytes, "Received bytes"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_runt_bytes", CTLFLAG_RD, &softc->rx_port_stats->rx_runt_bytes, "Received runt bytes"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_runt_frames", CTLFLAG_RD, &softc->rx_port_stats->rx_runt_frames, "Received runt frames"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_stat_discard", CTLFLAG_RD, &softc->rx_port_stats->rx_stat_discard, "Received stat discard"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "rx_stat_err", CTLFLAG_RD, &softc->rx_port_stats->rx_stat_err, "Received stat err"); return 0; } int bnxt_create_rx_sysctls(struct bnxt_softc *softc, int rxr) { struct sysctl_oid *oid; struct ctx_hw_stats *rx_stats = (void *)softc->rx_stats.idi_vaddr; char name[32]; char desc[64]; sprintf(name, "rxq%d", rxr); sprintf(desc, "receive queue %d", rxr); oid = SYSCTL_ADD_NODE(&softc->hw_stats, SYSCTL_CHILDREN(softc->hw_stats_oid), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, desc); if (!oid) return ENOMEM; SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "ucast_pkts", CTLFLAG_RD, &rx_stats[rxr].rx_ucast_pkts, "unicast packets received"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "mcast_pkts", CTLFLAG_RD, &rx_stats[rxr].rx_mcast_pkts, "multicast packets received"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "bcast_pkts", CTLFLAG_RD, &rx_stats[rxr].rx_bcast_pkts, "broadcast packets received"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "discard_pkts", CTLFLAG_RD, &rx_stats[rxr].rx_discard_pkts, "discarded receive packets"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "drop_pkts", CTLFLAG_RD, &rx_stats[rxr].rx_drop_pkts, "dropped receive packets"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "ucast_bytes", CTLFLAG_RD, &rx_stats[rxr].rx_ucast_bytes, "unicast bytes received"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "mcast_bytes", CTLFLAG_RD, &rx_stats[rxr].rx_mcast_bytes, "multicast bytes received"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "bcast_bytes", CTLFLAG_RD, &rx_stats[rxr].rx_bcast_bytes, "broadcast bytes received"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tpa_pkts", CTLFLAG_RD, &rx_stats[rxr].tpa_pkts, "TPA packets"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tpa_bytes", CTLFLAG_RD, &rx_stats[rxr].tpa_bytes, "TPA bytes"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tpa_events", CTLFLAG_RD, &rx_stats[rxr].tpa_events, "TPA events"); SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO, "tpa_aborts", CTLFLAG_RD, &rx_stats[rxr].tpa_aborts, "TPA aborts"); return 0; } static char *bnxt_chip_type[] = { "ASIC", "FPGA", "Palladium", "Unknown" }; #define MAX_CHIP_TYPE 3 static int bnxt_package_ver_sysctl(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; struct iflib_dma_info dma_data; char *pkglog = NULL; char *p; char *next; char unk[] = ""; char *buf = unk; int rc; int field; uint16_t ordinal = BNX_DIR_ORDINAL_FIRST; uint16_t index; uint32_t data_len; rc = bnxt_hwrm_nvm_find_dir_entry(softc, BNX_DIR_TYPE_PKG_LOG, &ordinal, BNX_DIR_EXT_NONE, &index, false, HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_EQ, &data_len, NULL, NULL); dma_data.idi_vaddr = NULL; if (rc == 0 && data_len) { rc = iflib_dma_alloc(softc->ctx, data_len, &dma_data, BUS_DMA_NOWAIT); if (rc == 0) { rc = bnxt_hwrm_nvm_read(softc, index, 0, data_len, &dma_data); if (rc == 0) { pkglog = dma_data.idi_vaddr; /* NULL terminate (removes last \n) */ pkglog[data_len-1] = 0; /* Set p = start of last line */ p = strrchr(pkglog, '\n'); if (p == NULL) p = pkglog; /* Now find the correct tab delimited field */ for (field = 0, next = p, p = strsep(&next, "\t"); field < BNX_PKG_LOG_FIELD_IDX_PKG_VERSION && p; p = strsep(&next, "\t")) { field++; } if (field == BNX_PKG_LOG_FIELD_IDX_PKG_VERSION) buf = p; } } else dma_data.idi_vaddr = NULL; } rc = sysctl_handle_string(oidp, buf, 0, req); if (dma_data.idi_vaddr) iflib_dma_free(&dma_data); return rc; } static int bnxt_hwrm_min_ver_sysctl(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; char buf[16]; uint8_t newver[3]; int rc; sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major, softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update); rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (rc || req->newptr == NULL) return rc; if (sscanf(buf, "%hhu.%hhu.%hhu%*c", &newver[0], &newver[1], &newver[2]) != 3) return EINVAL; softc->ver_info->hwrm_min_major = newver[0]; softc->ver_info->hwrm_min_minor = newver[1]; softc->ver_info->hwrm_min_update = newver[2]; bnxt_check_hwrm_version(softc); return rc; } int bnxt_create_ver_sysctls(struct bnxt_softc *softc) { struct bnxt_ver_info *vi = softc->ver_info; struct sysctl_oid *oid = vi->ver_oid; if (!oid) return ENOMEM; SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "hwrm_if", CTLFLAG_RD, vi->hwrm_if_ver, 0, "HWRM interface version"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "driver_hwrm_if", CTLFLAG_RD, vi->driver_hwrm_if_ver, 0, "HWRM firmware version"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "hwrm_fw", CTLFLAG_RD, vi->hwrm_fw_ver, 0, "HWRM firmware version"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "mgmt_fw", CTLFLAG_RD, vi->mgmt_fw_ver, 0, "management firmware version"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "netctrl_fw", CTLFLAG_RD, vi->netctrl_fw_ver, 0, "network control firmware version"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "roce_fw", CTLFLAG_RD, vi->roce_fw_ver, 0, "RoCE firmware version"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "phy", CTLFLAG_RD, vi->phy_ver, 0, "PHY version"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "hwrm_fw_name", CTLFLAG_RD, vi->hwrm_fw_name, 0, "HWRM firmware name"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "mgmt_fw_name", CTLFLAG_RD, vi->mgmt_fw_name, 0, "management firmware name"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "netctrl_fw_name", CTLFLAG_RD, vi->netctrl_fw_name, 0, "network control firmware name"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "roce_fw_name", CTLFLAG_RD, vi->roce_fw_name, 0, "RoCE firmware name"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "phy_vendor", CTLFLAG_RD, vi->phy_vendor, 0, "PHY vendor name"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "phy_partnumber", CTLFLAG_RD, vi->phy_partnumber, 0, "PHY vendor part number"); SYSCTL_ADD_U16(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "chip_num", CTLFLAG_RD, &vi->chip_num, 0, "chip number"); SYSCTL_ADD_U8(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "chip_rev", CTLFLAG_RD, &vi->chip_rev, 0, "chip revision"); SYSCTL_ADD_U8(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "chip_metal", CTLFLAG_RD, &vi->chip_metal, 0, "chip metal number"); SYSCTL_ADD_U8(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "chip_bond_id", CTLFLAG_RD, &vi->chip_bond_id, 0, "chip bond id"); SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "chip_type", CTLFLAG_RD, vi->chip_type > MAX_CHIP_TYPE ? bnxt_chip_type[MAX_CHIP_TYPE] : bnxt_chip_type[vi->chip_type], 0, "RoCE firmware name"); SYSCTL_ADD_PROC(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "package_ver", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, softc, 0, bnxt_package_ver_sysctl, "A", "currently installed package version"); SYSCTL_ADD_PROC(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "hwrm_min_ver", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_hwrm_min_ver_sysctl, "A", "minimum hwrm API vesion to support"); return 0; } int bnxt_create_nvram_sysctls(struct bnxt_nvram_info *ni) { struct sysctl_oid *oid = ni->nvm_oid; if (!oid) return ENOMEM; SYSCTL_ADD_U16(&ni->nvm_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "mfg_id", CTLFLAG_RD, &ni->mfg_id, 0, "manufacturer id"); SYSCTL_ADD_U16(&ni->nvm_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "device_id", CTLFLAG_RD, &ni->device_id, 0, "device id"); SYSCTL_ADD_U32(&ni->nvm_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "sector_size", CTLFLAG_RD, &ni->sector_size, 0, "sector size"); SYSCTL_ADD_U32(&ni->nvm_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "size", CTLFLAG_RD, &ni->size, 0, "nvram total size"); SYSCTL_ADD_U32(&ni->nvm_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "reserved_size", CTLFLAG_RD, &ni->reserved_size, 0, "total reserved space"); SYSCTL_ADD_U32(&ni->nvm_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "available_size", CTLFLAG_RD, &ni->available_size, 0, "total available space"); return 0; } static int bnxt_rss_key_sysctl(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; char buf[HW_HASH_KEY_SIZE*2+1] = {0}; char *p; int i; int rc; for (p = buf, i=0; ivnic_info.rss_hash_key[i]); rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (rc || req->newptr == NULL) return rc; if (strspn(buf, "0123456789abcdefABCDEF") != (HW_HASH_KEY_SIZE * 2)) return EINVAL; for (p = buf, i=0; ivnic_info.rss_hash_key[i]) != 1) return EINVAL; p += 2; } if (if_getdrvflags(iflib_get_ifp(softc->ctx)) & IFF_DRV_RUNNING) bnxt_hwrm_rss_cfg(softc, &softc->vnic_info, softc->vnic_info.rss_hash_type); return rc; } static const char *bnxt_hash_types[] = {"ipv4", "tcp_ipv4", "udp_ipv4", "ipv6", "tcp_ipv6", "udp_ipv6", NULL}; static int bnxt_get_rss_type_str_bit(char *str) { int i; for (i=0; bnxt_hash_types[i]; i++) if (strcmp(bnxt_hash_types[i], str) == 0) return i; return -1; } static int bnxt_rss_type_sysctl(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; char buf[256] = {0}; char *p; char *next; int rc; int type; int bit; for (type = softc->vnic_info.rss_hash_type; type; type &= ~(1<= sizeof(bnxt_hash_types) / sizeof(const char *)) continue; if (type != softc->vnic_info.rss_hash_type) strcat(buf, ","); strcat(buf, bnxt_hash_types[bit]); } rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (rc || req->newptr == NULL) return rc; for (type = 0, next = buf, p = strsep(&next, " ,"); p; p = strsep(&next, " ,")) { bit = bnxt_get_rss_type_str_bit(p); if (bit == -1) return EINVAL; type |= 1<vnic_info.rss_hash_type) { softc->vnic_info.rss_hash_type = type; if (if_getdrvflags(iflib_get_ifp(softc->ctx)) & IFF_DRV_RUNNING) bnxt_hwrm_rss_cfg(softc, &softc->vnic_info, softc->vnic_info.rss_hash_type); } return rc; } static int bnxt_rx_stall_sysctl(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = (bool)(softc->vnic_info.flags & BNXT_VNIC_FLAG_BD_STALL); rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; if (val) softc->vnic_info.flags |= BNXT_VNIC_FLAG_BD_STALL; else softc->vnic_info.flags &= ~BNXT_VNIC_FLAG_BD_STALL; if (if_getdrvflags(iflib_get_ifp(softc->ctx)) & IFF_DRV_RUNNING) rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info); return rc; } static int bnxt_vlan_strip_sysctl(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = (bool)(softc->vnic_info.flags & BNXT_VNIC_FLAG_VLAN_STRIP); rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; if (val) softc->vnic_info.flags |= BNXT_VNIC_FLAG_VLAN_STRIP; else softc->vnic_info.flags &= ~BNXT_VNIC_FLAG_VLAN_STRIP; if (if_getdrvflags(iflib_get_ifp(softc->ctx)) & IFF_DRV_RUNNING) rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info); return rc; } static int bnxt_set_coal_rx_usecs(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = softc->rx_coal_usecs; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; softc->rx_coal_usecs = val; rc = bnxt_hwrm_set_coal(softc); return rc; } static int bnxt_set_coal_rx_frames(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = softc->rx_coal_frames; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; softc->rx_coal_frames = val; rc = bnxt_hwrm_set_coal(softc); return rc; } static int bnxt_set_coal_rx_usecs_irq(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = softc->rx_coal_usecs_irq; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; softc->rx_coal_usecs_irq = val; rc = bnxt_hwrm_set_coal(softc); return rc; } static int bnxt_set_coal_rx_frames_irq(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = softc->rx_coal_frames_irq; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; softc->rx_coal_frames_irq = val; rc = bnxt_hwrm_set_coal(softc); return rc; } static int bnxt_set_coal_tx_usecs(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = softc->tx_coal_usecs; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; softc->tx_coal_usecs = val; rc = bnxt_hwrm_set_coal(softc); return rc; } static int bnxt_set_coal_tx_frames(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = softc->tx_coal_frames; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; softc->tx_coal_frames = val; rc = bnxt_hwrm_set_coal(softc); return rc; } static int bnxt_set_coal_tx_usecs_irq(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = softc->tx_coal_usecs_irq; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; softc->tx_coal_usecs_irq = val; rc = bnxt_hwrm_set_coal(softc); return rc; } static int bnxt_set_coal_tx_frames_irq(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = softc->tx_coal_frames_irq; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; softc->tx_coal_frames_irq = val; rc = bnxt_hwrm_set_coal(softc); return rc; } int bnxt_create_config_sysctls_pre(struct bnxt_softc *softc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(softc->dev); struct sysctl_oid_list *children; - children = SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev));; + children = SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev)); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_rss_key_sysctl, "A", "RSS key"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rss_type", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_rss_type_sysctl, "A", "RSS type bits"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_stall", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_rx_stall_sysctl, "I", "buffer rx packets in hardware until the host posts new buffers"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "vlan_strip", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_vlan_strip_sysctl, "I", "strip VLAN tag in the RX path"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "if_name", CTLFLAG_RD, iflib_get_ifp(softc->ctx)->if_xname, 0, "interface name"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_usecs", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_set_coal_rx_usecs, "I", "interrupt coalescing Rx Usecs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_frames", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_set_coal_rx_frames, "I", "interrupt coalescing Rx Frames"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_usecs_irq", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_set_coal_rx_usecs_irq, "I", "interrupt coalescing Rx Usecs IRQ"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_frames_irq", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_set_coal_rx_frames_irq, "I", "interrupt coalescing Rx Frames IRQ"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_usecs", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_set_coal_tx_usecs, "I", "interrupt coalescing Tx Usces"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_frames", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_set_coal_tx_frames, "I", "interrupt coalescing Tx Frames"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_usecs_irq", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_set_coal_tx_usecs_irq, "I", "interrupt coalescing Tx Usecs IRQ"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_frames_irq", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_set_coal_tx_frames_irq, "I", "interrupt coalescing Tx Frames IRQ"); return 0; } #define BNXT_HW_LRO_FN(fn_name, arg) \ static int \ fn_name(SYSCTL_HANDLER_ARGS) { \ struct bnxt_softc *softc = arg1; \ int rc; \ int val; \ \ if (softc == NULL) \ return EBUSY; \ \ val = softc->hw_lro.arg; \ rc = sysctl_handle_int(oidp, &val, 0, req); \ if (rc || !req->newptr) \ return rc; \ \ if ((if_getdrvflags(iflib_get_ifp(softc->ctx)) & IFF_DRV_RUNNING)) \ return EBUSY; \ \ softc->hw_lro.arg = val; \ bnxt_validate_hw_lro_settings(softc); \ rc = bnxt_hwrm_vnic_tpa_cfg(softc); \ \ return rc; \ } BNXT_HW_LRO_FN(bnxt_hw_lro_enable_disable, enable) BNXT_HW_LRO_FN(bnxt_hw_lro_set_mode, is_mode_gro) BNXT_HW_LRO_FN(bnxt_hw_lro_set_max_agg_segs, max_agg_segs) BNXT_HW_LRO_FN(bnxt_hw_lro_set_max_aggs, max_aggs) BNXT_HW_LRO_FN(bnxt_hw_lro_set_min_agg_len, min_agg_len) #define BNXT_FLOW_CTRL_FN(fn_name, arg) \ static int \ fn_name(SYSCTL_HANDLER_ARGS) { \ struct bnxt_softc *softc = arg1; \ int rc; \ int val; \ \ if (softc == NULL) \ return EBUSY; \ \ val = softc->link_info.flow_ctrl.arg; \ rc = sysctl_handle_int(oidp, &val, 0, req); \ if (rc || !req->newptr) \ return rc; \ \ if (val) \ val = 1; \ \ if (softc->link_info.flow_ctrl.arg != val) { \ softc->link_info.flow_ctrl.arg = val; \ rc = bnxt_hwrm_set_link_setting(softc, true, false, false);\ rc = bnxt_hwrm_port_phy_qcfg(softc); \ } \ \ return rc; \ } BNXT_FLOW_CTRL_FN(bnxt_flow_ctrl_tx, tx) BNXT_FLOW_CTRL_FN(bnxt_flow_ctrl_rx, rx) BNXT_FLOW_CTRL_FN(bnxt_flow_ctrl_autoneg, autoneg) int bnxt_create_pause_fc_sysctls(struct bnxt_softc *softc) { struct sysctl_oid *oid = softc->flow_ctrl_oid; if (!oid) return ENOMEM; SYSCTL_ADD_PROC(&softc->flow_ctrl_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "tx", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_flow_ctrl_tx, "A", "Enable or Disable Tx Flow Ctrl: 0 / 1"); SYSCTL_ADD_PROC(&softc->flow_ctrl_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "rx", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_flow_ctrl_rx, "A", "Enable or Disable Tx Flow Ctrl: 0 / 1"); SYSCTL_ADD_PROC(&softc->flow_ctrl_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "autoneg", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_flow_ctrl_autoneg, "A", "Enable or Disable Autoneg Flow Ctrl: 0 / 1"); return 0; } int bnxt_create_hw_lro_sysctls(struct bnxt_softc *softc) { struct sysctl_oid *oid = softc->hw_lro_oid; if (!oid) return ENOMEM; SYSCTL_ADD_PROC(&softc->hw_lro_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_hw_lro_enable_disable, "A", "Enable or Disable HW LRO: 0 / 1"); SYSCTL_ADD_PROC(&softc->hw_lro_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "gro_mode", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_hw_lro_set_mode, "A", "Set mode: 1 = GRO mode, 0 = RSC mode"); SYSCTL_ADD_PROC(&softc->hw_lro_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "max_agg_segs", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_hw_lro_set_max_agg_segs, "A", "Set Max Agg Seg Value (unit is Log2): " "0 (= 1 seg) / 1 (= 2 segs) / ... / 31 (= 2^31 segs)"); SYSCTL_ADD_PROC(&softc->hw_lro_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "max_aggs", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_hw_lro_set_max_aggs, "A", "Set Max Aggs Value (unit is Log2): " "0 (= 1 agg) / 1 (= 2 aggs) / ... / 7 (= 2^7 segs)"); SYSCTL_ADD_PROC(&softc->hw_lro_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "min_agg_len", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_hw_lro_set_min_agg_len, "A", "Min Agg Len: 1 to 9000"); return 0; } static int bnxt_vlan_only_sysctl(SYSCTL_HANDLER_ARGS) { struct bnxt_softc *softc = arg1; int rc; int val; if (softc == NULL) return EBUSY; val = softc->vnic_info.vlan_only; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc || !req->newptr) return rc; if (val) val = 1; if (val != softc->vnic_info.vlan_only) { softc->vnic_info.vlan_only = val; if (if_getdrvflags(iflib_get_ifp(softc->ctx)) & IFF_DRV_RUNNING) rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info); } return rc; } int bnxt_create_config_sysctls_post(struct bnxt_softc *softc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(softc->dev); struct sysctl_oid_list *children; - children = SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev));; + children = SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev)); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "vlan_only", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, bnxt_vlan_only_sysctl, "I", "require vlan tag on received packets when vlan is enabled"); return 0; } Index: head/sys/dev/bxe/bxe.c =================================================================== --- head/sys/dev/bxe/bxe.c (revision 359440) +++ head/sys/dev/bxe/bxe.c (revision 359441) @@ -1,19492 +1,19492 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define BXE_DRIVER_VERSION "1.78.91" #include "bxe.h" #include "ecore_sp.h" #include "ecore_init.h" #include "ecore_init_ops.h" #include "57710_int_offsets.h" #include "57711_int_offsets.h" #include "57712_int_offsets.h" /* * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these * explicitly here for older kernels that don't include this changeset. */ #ifndef CTLTYPE_U64 #define CTLTYPE_U64 CTLTYPE_QUAD #define sysctl_handle_64 sysctl_handle_quad #endif /* * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these * here as zero(0) for older kernels that don't include this changeset * thereby masking the functionality. */ #ifndef CSUM_TCP_IPV6 #define CSUM_TCP_IPV6 0 #define CSUM_UDP_IPV6 0 #endif #define BXE_DEF_SB_ATT_IDX 0x0001 #define BXE_DEF_SB_IDX 0x0002 /* * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per * function HW initialization. */ #define FLR_WAIT_USEC 10000 /* 10 msecs */ #define FLR_WAIT_INTERVAL 50 /* usecs */ #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ struct pbf_pN_buf_regs { int pN; uint32_t init_crd; uint32_t crd; uint32_t crd_freed; }; struct pbf_pN_cmd_regs { int pN; uint32_t lines_occup; uint32_t lines_freed; }; /* * PCI Device ID Table used by bxe_probe(). */ #define BXE_DEVDESC_MAX 64 static struct bxe_device_type bxe_devs[] = { { BRCM_VENDORID, CHIP_NUM_57710, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57710 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57711, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57711 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57711E, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57711E 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57712, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57712_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57800, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57800_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57810, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57810_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57811, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57811_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_4_10, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 4x10GbE" }, { QLOGIC_VENDORID, CHIP_NUM_57840_4_10, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 4x10GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_2_20, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 2x20GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 MF 10GbE" }, { 0, 0, 0, 0, NULL } }; MALLOC_DECLARE(M_BXE_ILT); MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); /* * FreeBSD device entry points. */ static int bxe_probe(device_t); static int bxe_attach(device_t); static int bxe_detach(device_t); static int bxe_shutdown(device_t); /* * FreeBSD KLD module/device interface event handler method. */ static device_method_t bxe_methods[] = { /* Device interface (device_if.h) */ DEVMETHOD(device_probe, bxe_probe), DEVMETHOD(device_attach, bxe_attach), DEVMETHOD(device_detach, bxe_detach), DEVMETHOD(device_shutdown, bxe_shutdown), /* Bus interface (bus_if.h) */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), KOBJMETHOD_END }; /* * FreeBSD KLD Module data declaration */ static driver_t bxe_driver = { "bxe", /* module name */ bxe_methods, /* event handler */ sizeof(struct bxe_softc) /* extra data */ }; /* * FreeBSD dev class is needed to manage dev instances and * to associate with a bus type */ static devclass_t bxe_devclass; MODULE_DEPEND(bxe, pci, 1, 1, 1); MODULE_DEPEND(bxe, ether, 1, 1, 1); DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); DEBUGNET_DEFINE(bxe); /* resources needed for unloading a previously loaded device */ #define BXE_PREV_WAIT_NEEDED 1 struct mtx bxe_prev_mtx; MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); struct bxe_prev_list_node { LIST_ENTRY(bxe_prev_list_node) node; uint8_t bus; uint8_t slot; uint8_t path; uint8_t aer; /* XXX automatic error recovery */ uint8_t undi; }; static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ /* Tunable device values... */ SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "bxe driver parameters"); /* Debug */ unsigned long bxe_debug = 0; SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, &bxe_debug, 0, "Debug logging mode"); /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ static int bxe_interrupt_mode = INTR_MODE_MSIX; SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ static int bxe_queue_count = 4; SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, &bxe_queue_count, 0, "Multi-Queue queue count"); /* max number of buffers per queue (default RX_BD_USABLE) */ static int bxe_max_rx_bufs = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); /* Host interrupt coalescing RX tick timer (usecs) */ static int bxe_hc_rx_ticks = 25; SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); /* Host interrupt coalescing TX tick timer (usecs) */ static int bxe_hc_tx_ticks = 50; SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); /* Maximum number of Rx packets to process at a time */ static int bxe_rx_budget = 0xffffffff; SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, &bxe_rx_budget, 0, "Rx processing budget"); /* Maximum LRO aggregation size */ static int bxe_max_aggregation_size = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, &bxe_max_aggregation_size, 0, "max aggregation size"); /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ static int bxe_mrrs = -1; SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, &bxe_mrrs, 0, "PCIe maximum read request size"); /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ static int bxe_autogreeen = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, &bxe_autogreeen, 0, "AutoGrEEEn support"); /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ static int bxe_udp_rss = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, &bxe_udp_rss, 0, "UDP RSS support"); #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ #define STATS_OFFSET32(stat_name) \ (offsetof(struct bxe_eth_stats, stat_name) / 4) #define Q_STATS_OFFSET32(stat_name) \ (offsetof(struct bxe_eth_q_stats, stat_name) / 4) static const struct { uint32_t offset; uint32_t size; uint32_t flags; #define STATS_FLAGS_PORT 1 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) char string[STAT_NAME_LEN]; } bxe_eth_stats_arr[] = { { STATS_OFFSET32(total_bytes_received_hi), 8, STATS_FLAGS_BOTH, "rx_bytes" }, { STATS_OFFSET32(error_bytes_received_hi), 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8, STATS_FLAGS_PORT, "rx_crc_errors" }, { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8, STATS_FLAGS_PORT, "rx_align_errors" }, { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 8, STATS_FLAGS_PORT, "rx_fragments" }, { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, STATS_FLAGS_PORT, "rx_jabbers" }, { STATS_OFFSET32(no_buff_discard_hi), 8, STATS_FLAGS_BOTH, "rx_discards" }, { STATS_OFFSET32(mac_filter_discard), 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, { STATS_OFFSET32(mf_tag_discard), 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, { STATS_OFFSET32(pfc_frames_received_hi), 8, STATS_FLAGS_PORT, "pfc_frames_received" }, { STATS_OFFSET32(pfc_frames_sent_hi), 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, { STATS_OFFSET32(brb_drop_hi), 8, STATS_FLAGS_PORT, "rx_brb_discard" }, { STATS_OFFSET32(brb_truncate_hi), 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, { STATS_OFFSET32(pause_frames_received_hi), 8, STATS_FLAGS_PORT, "rx_pause_frames" }, { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, { STATS_OFFSET32(nig_timer_max), 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, { STATS_OFFSET32(total_bytes_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_bytes" }, { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, STATS_FLAGS_PORT, "tx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8, STATS_FLAGS_PORT, "tx_mac_errors" }, { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8, STATS_FLAGS_PORT, "tx_single_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8, STATS_FLAGS_PORT, "tx_deferred" }, { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 8, STATS_FLAGS_PORT, "tx_late_collisions" }, { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 8, STATS_FLAGS_PORT, "tx_total_collisions" }, { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, { STATS_OFFSET32(etherstatspktsover1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, { STATS_OFFSET32(pause_frames_sent_hi), 8, STATS_FLAGS_PORT, "tx_pause_frames" }, { STATS_OFFSET32(total_tpa_aggregations_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, { STATS_OFFSET32(total_tpa_bytes_hi), 8, STATS_FLAGS_FUNC, "tpa_bytes"}, { STATS_OFFSET32(eee_tx_lpi), 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, { STATS_OFFSET32(rx_calls), 4, STATS_FLAGS_FUNC, "rx_calls"}, { STATS_OFFSET32(rx_pkts), 4, STATS_FLAGS_FUNC, "rx_pkts"}, { STATS_OFFSET32(rx_tpa_pkts), 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, { STATS_OFFSET32(rx_bxe_service_rxsgl), 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, { STATS_OFFSET32(rx_jumbo_sge_pkts), 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, { STATS_OFFSET32(rx_soft_errors), 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, { STATS_OFFSET32(rx_hw_csum_errors), 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, { STATS_OFFSET32(rx_ofld_frames_csum_ip), 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, { STATS_OFFSET32(rx_budget_reached), 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, { STATS_OFFSET32(tx_pkts), 4, STATS_FLAGS_FUNC, "tx_pkts"}, { STATS_OFFSET32(tx_soft_errors), 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, { STATS_OFFSET32(tx_ofld_frames_csum_ip), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, { STATS_OFFSET32(tx_ofld_frames_csum_udp), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, { STATS_OFFSET32(tx_ofld_frames_lso), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, { STATS_OFFSET32(tx_encap_failures), 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, { STATS_OFFSET32(tx_hw_queue_full), 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, { STATS_OFFSET32(tx_hw_max_queue_depth), 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, { STATS_OFFSET32(tx_dma_mapping_failure), 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, { STATS_OFFSET32(tx_max_drbr_queue_depth), 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, { STATS_OFFSET32(tx_window_violation_std), 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, { STATS_OFFSET32(tx_window_violation_tso), 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, { STATS_OFFSET32(tx_chain_lost_mbuf), 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, { STATS_OFFSET32(tx_frames_deferred), 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, { STATS_OFFSET32(tx_queue_xoff), 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, { STATS_OFFSET32(mbuf_defrag_attempts), 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, { STATS_OFFSET32(mbuf_defrag_failures), 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, { STATS_OFFSET32(mbuf_alloc_tx), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, { STATS_OFFSET32(mbuf_alloc_rx), 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, { STATS_OFFSET32(mbuf_alloc_sge), 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, { STATS_OFFSET32(mbuf_alloc_tpa), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}, { STATS_OFFSET32(tx_queue_full_return), 4, STATS_FLAGS_FUNC, "tx_queue_full_return"}, { STATS_OFFSET32(bxe_tx_mq_sc_state_failures), 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"}, { STATS_OFFSET32(tx_request_link_down_failures), 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"}, { STATS_OFFSET32(bd_avail_too_less_failures), 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"}, { STATS_OFFSET32(tx_mq_not_empty), 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}, { STATS_OFFSET32(nsegs_path1_errors), 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"}, { STATS_OFFSET32(nsegs_path2_errors), 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"} }; static const struct { uint32_t offset; uint32_t size; char string[STAT_NAME_LEN]; } bxe_eth_q_stats_arr[] = { { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "rx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 8, "rx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 8, "rx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 8, "rx_bcast_packets" }, { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "rx_discards" }, { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "tx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, "tx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, "tx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 8, "tx_bcast_packets" }, { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 8, "tpa_aggregations" }, { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, "tpa_aggregated_frames"}, { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "tpa_bytes"}, { Q_STATS_OFFSET32(rx_calls), 4, "rx_calls"}, { Q_STATS_OFFSET32(rx_pkts), 4, "rx_pkts"}, { Q_STATS_OFFSET32(rx_tpa_pkts), 4, "rx_tpa_pkts"}, { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 4, "rx_erroneous_jumbo_sge_pkts"}, { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 4, "rx_bxe_service_rxsgl"}, { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 4, "rx_jumbo_sge_pkts"}, { Q_STATS_OFFSET32(rx_soft_errors), 4, "rx_soft_errors"}, { Q_STATS_OFFSET32(rx_hw_csum_errors), 4, "rx_hw_csum_errors"}, { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 4, "rx_ofld_frames_csum_ip"}, { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 4, "rx_ofld_frames_csum_tcp_udp"}, { Q_STATS_OFFSET32(rx_budget_reached), 4, "rx_budget_reached"}, { Q_STATS_OFFSET32(tx_pkts), 4, "tx_pkts"}, { Q_STATS_OFFSET32(tx_soft_errors), 4, "tx_soft_errors"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 4, "tx_ofld_frames_csum_ip"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 4, "tx_ofld_frames_csum_tcp"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 4, "tx_ofld_frames_csum_udp"}, { Q_STATS_OFFSET32(tx_ofld_frames_lso), 4, "tx_ofld_frames_lso"}, { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 4, "tx_ofld_frames_lso_hdr_splits"}, { Q_STATS_OFFSET32(tx_encap_failures), 4, "tx_encap_failures"}, { Q_STATS_OFFSET32(tx_hw_queue_full), 4, "tx_hw_queue_full"}, { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 4, "tx_hw_max_queue_depth"}, { Q_STATS_OFFSET32(tx_dma_mapping_failure), 4, "tx_dma_mapping_failure"}, { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 4, "tx_max_drbr_queue_depth"}, { Q_STATS_OFFSET32(tx_window_violation_std), 4, "tx_window_violation_std"}, { Q_STATS_OFFSET32(tx_window_violation_tso), 4, "tx_window_violation_tso"}, { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 4, "tx_chain_lost_mbuf"}, { Q_STATS_OFFSET32(tx_frames_deferred), 4, "tx_frames_deferred"}, { Q_STATS_OFFSET32(tx_queue_xoff), 4, "tx_queue_xoff"}, { Q_STATS_OFFSET32(mbuf_defrag_attempts), 4, "mbuf_defrag_attempts"}, { Q_STATS_OFFSET32(mbuf_defrag_failures), 4, "mbuf_defrag_failures"}, { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 4, "mbuf_rx_bd_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 4, "mbuf_rx_bd_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 4, "mbuf_rx_tpa_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 4, "mbuf_rx_tpa_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 4, "mbuf_rx_sge_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 4, "mbuf_rx_sge_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_alloc_tx), 4, "mbuf_alloc_tx"}, { Q_STATS_OFFSET32(mbuf_alloc_rx), 4, "mbuf_alloc_rx"}, { Q_STATS_OFFSET32(mbuf_alloc_sge), 4, "mbuf_alloc_sge"}, { Q_STATS_OFFSET32(mbuf_alloc_tpa), 4, "mbuf_alloc_tpa"}, { Q_STATS_OFFSET32(tx_queue_full_return), 4, "tx_queue_full_return"}, { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures), 4, "bxe_tx_mq_sc_state_failures"}, { Q_STATS_OFFSET32(tx_request_link_down_failures), 4, "tx_request_link_down_failures"}, { Q_STATS_OFFSET32(bd_avail_too_less_failures), 4, "bd_avail_too_less_failures"}, { Q_STATS_OFFSET32(tx_mq_not_empty), 4, "tx_mq_not_empty"}, { Q_STATS_OFFSET32(nsegs_path1_errors), 4, "nsegs_path1_errors"}, { Q_STATS_OFFSET32(nsegs_path2_errors), 4, "nsegs_path2_errors"} }; #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) static void bxe_cmng_fns_init(struct bxe_softc *sc, uint8_t read_cfg, uint8_t cmng_type); static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); static void storm_memset_cmng(struct bxe_softc *sc, struct cmng_init *cmng, uint8_t port); static void bxe_set_reset_global(struct bxe_softc *sc); static void bxe_set_reset_in_progress(struct bxe_softc *sc); static uint8_t bxe_reset_is_done(struct bxe_softc *sc, int engine); static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print); static void bxe_int_disable(struct bxe_softc *sc); static int bxe_release_leader_lock(struct bxe_softc *sc); static void bxe_pf_disable(struct bxe_softc *sc); static void bxe_free_fp_buffers(struct bxe_softc *sc); static inline void bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod, uint16_t rx_sge_prod); static void bxe_link_report_locked(struct bxe_softc *sc); static void bxe_link_report(struct bxe_softc *sc); static void bxe_link_status_update(struct bxe_softc *sc); static void bxe_periodic_callout_func(void *xsc); static void bxe_periodic_start(struct bxe_softc *sc); static void bxe_periodic_stop(struct bxe_softc *sc); static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t prev_index, uint16_t index); static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, int queue); static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index); static uint8_t bxe_txeof(struct bxe_softc *sc, struct bxe_fastpath *fp); static void bxe_task_fp(struct bxe_fastpath *fp); static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m, uint8_t contents); static int bxe_alloc_mem(struct bxe_softc *sc); static void bxe_free_mem(struct bxe_softc *sc); static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); static void bxe_free_fw_stats_mem(struct bxe_softc *sc); static int bxe_interrupt_attach(struct bxe_softc *sc); static void bxe_interrupt_detach(struct bxe_softc *sc); static void bxe_set_rx_mode(struct bxe_softc *sc); static int bxe_init_locked(struct bxe_softc *sc); static int bxe_stop_locked(struct bxe_softc *sc); static void bxe_sp_err_timeout_task(void *arg, int pending); void bxe_parity_recover(struct bxe_softc *sc); void bxe_handle_error(struct bxe_softc *sc); static __noinline int bxe_nic_load(struct bxe_softc *sc, int load_mode); static __noinline int bxe_nic_unload(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link); static void bxe_handle_sp_tq(void *context, int pending); static void bxe_handle_fp_tq(void *context, int pending); static int bxe_add_cdev(struct bxe_softc *sc); static void bxe_del_cdev(struct bxe_softc *sc); int bxe_grc_dump(struct bxe_softc *sc); static int bxe_alloc_buf_rings(struct bxe_softc *sc); static void bxe_free_buf_rings(struct bxe_softc *sc); /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ uint32_t calc_crc32(uint8_t *crc32_packet, uint32_t crc32_length, uint32_t crc32_seed, uint8_t complement) { uint32_t byte = 0; uint32_t bit = 0; uint8_t msb = 0; uint32_t temp = 0; uint32_t shft = 0; uint8_t current_byte = 0; uint32_t crc32_result = crc32_seed; const uint32_t CRC32_POLY = 0x1edc6f41; if ((crc32_packet == NULL) || (crc32_length == 0) || ((crc32_length % 8) != 0)) { return (crc32_result); } for (byte = 0; byte < crc32_length; byte = byte + 1) { current_byte = crc32_packet[byte]; for (bit = 0; bit < 8; bit = bit + 1) { /* msb = crc32_result[31]; */ msb = (uint8_t)(crc32_result >> 31); crc32_result = crc32_result << 1; /* it (msb != current_byte[bit]) */ if (msb != (0x1 & (current_byte >> bit))) { crc32_result = crc32_result ^ CRC32_POLY; /* crc32_result[0] = 1 */ crc32_result |= 1; } } } /* Last step is to: * 1. "mirror" every bit * 2. swap the 4 bytes * 3. complement each bit */ /* Mirror */ temp = crc32_result; shft = sizeof(crc32_result) * 8 - 1; for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) { temp <<= 1; temp |= crc32_result & 1; shft-- ; } /* temp[31-bit] = crc32_result[bit] */ temp <<= shft; /* Swap */ /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ { uint32_t t0, t1, t2, t3; t0 = (0x000000ff & (temp >> 24)); t1 = (0x0000ff00 & (temp >> 8)); t2 = (0x00ff0000 & (temp << 8)); t3 = (0xff000000 & (temp << 24)); crc32_result = t0 | t1 | t2 | t3; } /* Complement */ if (complement) { crc32_result = ~crc32_result; } return (crc32_result); } int bxe_test_bit(int nr, volatile unsigned long *addr) { return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); } void bxe_set_bit(unsigned int nr, volatile unsigned long *addr) { atomic_set_acq_long(addr, (1 << nr)); } void bxe_clear_bit(int nr, volatile unsigned long *addr) { atomic_clear_acq_long(addr, (1 << nr)); } int bxe_test_and_set_bit(int nr, volatile unsigned long *addr) { unsigned long x; nr = (1 << nr); do { x = *addr; } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); // if (x & nr) bit_was_set; else bit_was_not_set; return (x & nr); } int bxe_test_and_clear_bit(int nr, volatile unsigned long *addr) { unsigned long x; nr = (1 << nr); do { x = *addr; } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); // if (x & nr) bit_was_set; else bit_was_not_set; return (x & nr); } int bxe_cmpxchg(volatile int *addr, int old, int new) { int x; do { x = *addr; } while (atomic_cmpset_acq_int(addr, old, new) == 0); return (x); } /* * Get DMA memory from the OS. * * Validates that the OS has provided DMA buffers in response to a * bus_dmamap_load call and saves the physical address of those buffers. * When the callback is used the OS will return 0 for the mapping function * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any * failures back to the caller. * * Returns: * Nothing. */ static void bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bxe_dma *dma = arg; if (error) { dma->paddr = 0; dma->nseg = 0; BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); } else { dma->paddr = segs->ds_addr; dma->nseg = nseg; } } /* * Allocate a block of memory and map it for DMA. No partial completions * allowed and release any resources acquired if we can't acquire all * resources. * * Returns: * 0 = Success, !0 = Failure */ int bxe_dma_alloc(struct bxe_softc *sc, bus_size_t size, struct bxe_dma *dma, const char *msg) { int rc; if (dma->size > 0) { BLOGE(sc, "dma block '%s' already has size %lu\n", msg, (unsigned long)dma->size); return (1); } memset(dma, 0, sizeof(*dma)); /* sanity */ dma->sc = sc; dma->size = size; snprintf(dma->msg, sizeof(dma->msg), "%s", msg); rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ BCM_PAGE_SIZE, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ size, /* max map size */ 1, /* num discontinuous */ size, /* max seg size */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &dma->tag); /* returned dma tag */ if (rc != 0) { BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); memset(dma, 0, sizeof(*dma)); return (1); } rc = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &dma->map); if (rc != 0) { BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); bus_dma_tag_destroy(dma->tag); memset(dma, 0, sizeof(*dma)); return (1); } rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, bxe_dma_map_addr, /* BLOGD in here */ dma, BUS_DMA_NOWAIT); if (rc != 0) { BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); bus_dma_tag_destroy(dma->tag); memset(dma, 0, sizeof(*dma)); return (1); } return (0); } void bxe_dma_free(struct bxe_softc *sc, struct bxe_dma *dma) { if (dma->size > 0) { DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); bus_dmamap_sync(dma->tag, dma->map, (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); bus_dmamap_unload(dma->tag, dma->map); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); bus_dma_tag_destroy(dma->tag); } memset(dma, 0, sizeof(*dma)); } /* * These indirect read and write routines are only during init. * The locking is handled by the MCP. */ void bxe_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val) { pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); } uint32_t bxe_reg_rd_ind(struct bxe_softc *sc, uint32_t addr) { uint32_t val; pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); return (val); } static int bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; int cnt; /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" " resource_bit 0x%x\n", resource, resource_bit); return (-1); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); } /* validate the resource is not already taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n", resource, lock_status, resource_bit); return (-1); } /* try every 5ms for 5 seconds */ for (cnt = 0; cnt < 1000; cnt++) { REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { return (0); } DELAY(5000); } BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n", resource, resource_bit); return (-1); } static int bxe_release_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" " resource_bit 0x%x\n", resource, resource_bit); return (-1); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); } /* validate the resource is currently taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (!(lock_status & resource_bit)) { BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n", resource, lock_status, resource_bit); return (-1); } REG_WR(sc, hw_lock_control_reg, resource_bit); return (0); } static void bxe_acquire_phy_lock(struct bxe_softc *sc) { BXE_PHY_LOCK(sc); bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); } static void bxe_release_phy_lock(struct bxe_softc *sc) { bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); BXE_PHY_UNLOCK(sc); } /* * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, * had we done things the other way around, if two pfs from the same port * would attempt to access nvram at the same time, we could run into a * scenario such as: * pf A takes the port lock. * pf B succeeds in taking the same lock since they are from the same port. * pf A takes the per pf misc lock. Performs eeprom access. * pf A finishes. Unlocks the per pf misc lock. * Pf B takes the lock and proceeds to perform it's own access. * pf A unlocks the per port lock, while pf B is still working (!). * mcp takes the per port lock and corrupts pf B's access (and/or has it's own * access corrupted by pf B).* */ static int bxe_acquire_nvram_lock(struct bxe_softc *sc) { int port = SC_PORT(sc); int count, i; uint32_t val = 0; /* acquire HW lock: protect against other PFs in PF Direct Assignment */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* request access to nvram interface */ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); for (i = 0; i < count*10; i++) { val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { break; } DELAY(5); } if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { BLOGE(sc, "Cannot get access to nvram interface " "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", port, val); return (-1); } return (0); } static int bxe_release_nvram_lock(struct bxe_softc *sc) { int port = SC_PORT(sc); int count, i; uint32_t val = 0; /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* relinquish nvram interface */ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); for (i = 0; i < count*10; i++) { val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { break; } DELAY(5); } if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { BLOGE(sc, "Cannot free access to nvram interface " "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", port, val); return (-1); } /* release HW lock: protect against other PFs in PF Direct Assignment */ bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); return (0); } static void bxe_enable_nvram_access(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); /* enable both bits, even on read */ REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); } static void bxe_disable_nvram_access(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); /* disable both bits, even after read */ REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN))); } static int bxe_nvram_read_dword(struct bxe_softc *sc, uint32_t offset, uint32_t *ret_val, uint32_t cmd_flags) { int count, i, rc; uint32_t val; /* build the command word */ cmd_flags |= MCPR_NVM_COMMAND_DOIT; /* need to clear DONE bit separately */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); /* address of the NVRAM to read from */ REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); /* issue a read command */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* wait for completion */ *ret_val = 0; rc = -1; for (i = 0; i < count; i++) { DELAY(5); val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); if (val & MCPR_NVM_COMMAND_DONE) { val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); /* we read nvram data in cpu order * but ethtool sees it as an array of bytes * converting to big-endian will do the work */ *ret_val = htobe32(val); rc = 0; break; } } if (rc == -1) { BLOGE(sc, "nvram read timeout expired " "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", offset, cmd_flags, val); } return (rc); } static int bxe_nvram_read(struct bxe_softc *sc, uint32_t offset, uint8_t *ret_buf, int buf_size) { uint32_t cmd_flags; uint32_t val; int rc; if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", offset, buf_size); return (-1); } if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); /* read the first word(s) */ cmd_flags = MCPR_NVM_COMMAND_FIRST; while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); memcpy(ret_buf, &val, 4); /* advance to the next dword */ offset += sizeof(uint32_t); ret_buf += sizeof(uint32_t); buf_size -= sizeof(uint32_t); cmd_flags = 0; } if (rc == 0) { cmd_flags |= MCPR_NVM_COMMAND_LAST; rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); memcpy(ret_buf, &val, 4); } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } static int bxe_nvram_write_dword(struct bxe_softc *sc, uint32_t offset, uint32_t val, uint32_t cmd_flags) { int count, i, rc; /* build the command word */ cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); /* need to clear DONE bit separately */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); /* write the data */ REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); /* address of the NVRAM to write to */ REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); /* issue the write command */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* wait for completion */ rc = -1; for (i = 0; i < count; i++) { DELAY(5); val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); if (val & MCPR_NVM_COMMAND_DONE) { rc = 0; break; } } if (rc == -1) { BLOGE(sc, "nvram write timeout expired " "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", offset, cmd_flags, val); } return (rc); } #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) static int bxe_nvram_write1(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf, int buf_size) { uint32_t cmd_flags; uint32_t align_offset; uint32_t val; int rc; if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); align_offset = (offset & ~0x03); rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); if (rc == 0) { val &= ~(0xff << BYTE_OFFSET(offset)); val |= (*data_buf << BYTE_OFFSET(offset)); /* nvram data is returned as an array of bytes * convert it back to cpu order */ val = be32toh(val); rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } static int bxe_nvram_write(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf, int buf_size) { uint32_t cmd_flags; uint32_t val; uint32_t written_so_far; int rc; if (buf_size == 1) { return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); } if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", offset, buf_size); return (-1); } if (buf_size == 0) { return (0); /* nothing to do */ } if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); written_so_far = 0; cmd_flags = MCPR_NVM_COMMAND_FIRST; while ((written_so_far < buf_size) && (rc == 0)) { if (written_so_far == (buf_size - sizeof(uint32_t))) { cmd_flags |= MCPR_NVM_COMMAND_LAST; } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { cmd_flags |= MCPR_NVM_COMMAND_LAST; } else if ((offset % NVRAM_PAGE_SIZE) == 0) { cmd_flags |= MCPR_NVM_COMMAND_FIRST; } memcpy(&val, data_buf, 4); rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); /* advance to the next dword */ offset += sizeof(uint32_t); data_buf += sizeof(uint32_t); written_so_far += sizeof(uint32_t); cmd_flags = 0; } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } /* copy command into DMAE command memory and set DMAE command Go */ void bxe_post_dmae(struct bxe_softc *sc, struct dmae_cmd *dmae, int idx) { uint32_t cmd_offset; int i; cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx)); for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) { REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); } REG_WR(sc, dmae_reg_go_c[idx], 1); } uint32_t bxe_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type) { return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) | DMAE_CMD_C_TYPE_ENABLE)); } uint32_t bxe_dmae_opcode_clr_src_reset(uint32_t opcode) { return (opcode & ~DMAE_CMD_SRC_RESET); } uint32_t bxe_dmae_opcode(struct bxe_softc *sc, uint8_t src_type, uint8_t dst_type, uint8_t with_comp, uint8_t comp_type) { uint32_t opcode = 0; opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) | (dst_type << DMAE_CMD_DST_SHIFT)); opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) | (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT)); opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT); #ifdef __BIG_ENDIAN opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; #else opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; #endif if (with_comp) { opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); } return (opcode); } static void bxe_prep_dmae_with_comp(struct bxe_softc *sc, struct dmae_cmd *dmae, uint8_t src_type, uint8_t dst_type) { memset(dmae, 0, sizeof(struct dmae_cmd)); /* set the opcode */ dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, TRUE, DMAE_COMP_PCI); /* fill in the completion parameters */ dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); dmae->comp_val = DMAE_COMP_VAL; } /* issue a DMAE command over the init channel and wait for completion */ static int bxe_issue_dmae_with_comp(struct bxe_softc *sc, struct dmae_cmd *dmae) { uint32_t *wb_comp = BXE_SP(sc, wb_comp); int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; BXE_DMAE_LOCK(sc); /* reset completion */ *wb_comp = 0; /* post the command on the channel used for initializations */ bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); /* wait for completion */ DELAY(5); while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { if (!timeout || (sc->recovery_state != BXE_RECOVERY_DONE && sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n", *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_TIMEOUT); } timeout--; DELAY(50); } if (*wb_comp & DMAE_PCI_ERR_FLAG) { BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n", *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_PCI_ERROR); } BXE_DMAE_UNLOCK(sc); return (0); } void bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr, uint32_t len32) { struct dmae_cmd dmae; uint32_t *data; int i, rc; DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); if (!sc->dmae_ready) { data = BXE_SP(sc, wb_data[0]); for (i = 0; i < len32; i++) { data[i] = (CHIP_IS_E1(sc)) ? bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : REG_RD(sc, (src_addr + (i * 4))); } return; } /* set opcode and fixed command fields */ bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); /* fill in addresses and len */ dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ dmae.src_addr_hi = 0; dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); dmae.len = len32; /* issue the command and wait for completion */ if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { bxe_panic(sc, ("DMAE failed (%d)\n", rc)); } } void bxe_write_dmae(struct bxe_softc *sc, bus_addr_t dma_addr, uint32_t dst_addr, uint32_t len32) { struct dmae_cmd dmae; int rc; if (!sc->dmae_ready) { DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); if (CHIP_IS_E1(sc)) { ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); } else { ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); } return; } /* set opcode and fixed command fields */ bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); /* fill in addresses and len */ dmae.src_addr_lo = U64_LO(dma_addr); dmae.src_addr_hi = U64_HI(dma_addr); dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ dmae.dst_addr_hi = 0; dmae.len = len32; /* issue the command and wait for completion */ if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { bxe_panic(sc, ("DMAE failed (%d)\n", rc)); } } void bxe_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len) { int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); int offset = 0; while (len > dmae_wr_max) { bxe_write_dmae(sc, (phys_addr + offset), /* src DMA address */ (addr + offset), /* dst GRC address */ dmae_wr_max); offset += (dmae_wr_max * 4); len -= dmae_wr_max; } bxe_write_dmae(sc, (phys_addr + offset), /* src DMA address */ (addr + offset), /* dst GRC address */ len); } void bxe_set_ctx_validation(struct bxe_softc *sc, struct eth_context *cxt, uint32_t cid) { /* ustorm cxt validation */ cxt->ustorm_ag_context.cdu_usage = CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); /* xcontext validation */ cxt->xstorm_ag_context.cdu_reserved = CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); } static void bxe_storm_memset_hc_timeout(struct bxe_softc *sc, uint8_t port, uint8_t fw_sb_id, uint8_t sb_index, uint8_t ticks) { uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); REG_WR8(sc, addr, ticks); BLOGD(sc, DBG_LOAD, "port %d fw_sb_id %d sb_index %d ticks %d\n", port, fw_sb_id, sb_index, ticks); } static void bxe_storm_memset_hc_disable(struct bxe_softc *sc, uint8_t port, uint16_t fw_sb_id, uint8_t sb_index, uint8_t disable) { uint32_t enable_flag = (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); uint8_t flags; /* clear and set */ flags = REG_RD8(sc, addr); flags &= ~HC_INDEX_DATA_HC_ENABLED; flags |= enable_flag; REG_WR8(sc, addr, flags); BLOGD(sc, DBG_LOAD, "port %d fw_sb_id %d sb_index %d disable %d\n", port, fw_sb_id, sb_index, disable); } void bxe_update_coalesce_sb_index(struct bxe_softc *sc, uint8_t fw_sb_id, uint8_t sb_index, uint8_t disable, uint16_t usec) { int port = SC_PORT(sc); uint8_t ticks = (usec / 4); /* XXX ??? */ bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); disable = (disable) ? 1 : ((usec) ? 0 : 1); bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); } void elink_cb_udelay(struct bxe_softc *sc, uint32_t usecs) { DELAY(usecs); } uint32_t elink_cb_reg_read(struct bxe_softc *sc, uint32_t reg_addr) { return (REG_RD(sc, reg_addr)); } void elink_cb_reg_write(struct bxe_softc *sc, uint32_t reg_addr, uint32_t val) { REG_WR(sc, reg_addr, val); } void elink_cb_reg_wb_write(struct bxe_softc *sc, uint32_t offset, uint32_t *wb_write, uint16_t len) { REG_WR_DMAE(sc, offset, wb_write, len); } void elink_cb_reg_wb_read(struct bxe_softc *sc, uint32_t offset, uint32_t *wb_write, uint16_t len) { REG_RD_DMAE(sc, offset, wb_write, len); } uint8_t elink_cb_path_id(struct bxe_softc *sc) { return (SC_PATH(sc)); } void elink_cb_event_log(struct bxe_softc *sc, const elink_log_id_t elink_log_id, ...) { /* XXX */ BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); } static int bxe_set_spio(struct bxe_softc *sc, int spio, uint32_t mode) { uint32_t spio_reg; /* Only 2 SPIOs are configurable */ if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); /* read SPIO and mask except the float bits */ spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); switch (mode) { case MISC_SPIO_OUTPUT_LOW: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); /* clear FLOAT and set CLR */ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); spio_reg |= (spio << MISC_SPIO_CLR_POS); break; case MISC_SPIO_OUTPUT_HIGH: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); /* clear FLOAT and set SET */ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); spio_reg |= (spio << MISC_SPIO_SET_POS); break; case MISC_SPIO_INPUT_HI_Z: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); /* set FLOAT */ spio_reg |= (spio << MISC_SPIO_FLOAT_POS); break; default: break; } REG_WR(sc, MISC_REG_SPIO, spio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); return (0); } static int bxe_gpio_read(struct bxe_softc *sc, int gpio_num, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d" " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift, gpio_mask); return (-1); } /* read GPIO value */ gpio_reg = REG_RD(sc, MISC_REG_GPIO); /* get the requested pin value */ return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; } static int bxe_gpio_write(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" " gpio_shift %d gpio_mask 0x%x\n", gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear FLOAT and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear FLOAT and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> input\n", gpio_num, gpio_shift); /* set FLOAT */ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: break; } REG_WR(sc, MISC_REG_GPIO, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } static int bxe_gpio_mult_write(struct bxe_softc *sc, uint8_t pins, uint32_t mode) { uint32_t gpio_reg; /* any port swapping should be handled by caller */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = REG_RD(sc, MISC_REG_GPIO); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); /* set CLR */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); /* set SET */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); /* set FLOAT */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x" " gpio_reg 0x%x\n", pins, mode, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (-1); } REG_WR(sc, MISC_REG_GPIO, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } static int bxe_gpio_int_write(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" " gpio_shift %d gpio_mask 0x%x\n", gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO int */ gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); switch (mode) { case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: BLOGD(sc, DBG_PHY, "Clear GPIO INT %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear SET and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); break; case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: BLOGD(sc, DBG_PHY, "Set GPIO INT %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear CLR and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); break; default: break; } REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } uint32_t elink_cb_gpio_read(struct bxe_softc *sc, uint16_t gpio_num, uint8_t port) { return (bxe_gpio_read(sc, gpio_num, port)); } uint8_t elink_cb_gpio_write(struct bxe_softc *sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ uint8_t port) { return (bxe_gpio_write(sc, gpio_num, mode, port)); } uint8_t elink_cb_gpio_mult_write(struct bxe_softc *sc, uint8_t pins, uint8_t mode) /* 0=low 1=high */ { return (bxe_gpio_mult_write(sc, pins, mode)); } uint8_t elink_cb_gpio_int_write(struct bxe_softc *sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ uint8_t port) { return (bxe_gpio_int_write(sc, gpio_num, mode, port)); } void elink_cb_notify_link_changed(struct bxe_softc *sc) { REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + (SC_FUNC(sc) * sizeof(uint32_t))), 1); } /* send the MCP a request, block until there is a reply */ uint32_t elink_cb_fw_command(struct bxe_softc *sc, uint32_t command, uint32_t param) { int mb_idx = SC_FW_MB_IDX(sc); uint32_t seq; uint32_t rc = 0; uint32_t cnt = 1; uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; BXE_FWMB_LOCK(sc); seq = ++sc->fw_seq; SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); BLOGD(sc, DBG_PHY, "wrote command 0x%08x to FW MB param 0x%08x\n", (command | seq), param); /* Let the FW do it's magic. GIve it up to 5 seconds... */ do { DELAY(delay * 1000); rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); BLOGD(sc, DBG_PHY, "[after %d ms] read 0x%x seq 0x%x from FW MB\n", cnt*delay, rc, seq); /* is this a reply to our command? */ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { rc &= FW_MSG_CODE_MASK; } else { /* Ruh-roh! */ BLOGE(sc, "FW failed to respond!\n"); // XXX bxe_fw_dump(sc); rc = 0; } BXE_FWMB_UNLOCK(sc); return (rc); } static uint32_t bxe_fw_command(struct bxe_softc *sc, uint32_t command, uint32_t param) { return (elink_cb_fw_command(sc, command, param)); } static void __storm_memset_dma_mapping(struct bxe_softc *sc, uint32_t addr, bus_addr_t mapping) { REG_WR(sc, addr, U64_LO(mapping)); REG_WR(sc, (addr + 4), U64_HI(mapping)); } static void storm_memset_spq_addr(struct bxe_softc *sc, bus_addr_t mapping, uint16_t abs_fid) { uint32_t addr = (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); __storm_memset_dma_mapping(sc, addr, mapping); } static void storm_memset_vf_to_pf(struct bxe_softc *sc, uint16_t abs_fid, uint16_t pf_id) { REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); } static void storm_memset_func_en(struct bxe_softc *sc, uint16_t abs_fid, uint8_t enable) { REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); } static void storm_memset_eq_data(struct bxe_softc *sc, struct event_ring_data *eq_data, uint16_t pfid) { uint32_t addr; size_t size; addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); size = sizeof(struct event_ring_data); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); } static void storm_memset_eq_prod(struct bxe_softc *sc, uint16_t eq_prod, uint16_t pfid) { uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid)); REG_WR16(sc, addr, eq_prod); } /* * Post a slowpath command. * * A slowpath command is used to propagate a configuration change through * the controller in a controlled manner, allowing each STORM processor and * other H/W blocks to phase in the change. The commands sent on the * slowpath are referred to as ramrods. Depending on the ramrod used the * completion of the ramrod will occur in different ways. Here's a * breakdown of ramrods and how they complete: * * RAMROD_CMD_ID_ETH_PORT_SETUP * Used to setup the leading connection on a port. Completes on the * Receive Completion Queue (RCQ) of that port (typically fp[0]). * * RAMROD_CMD_ID_ETH_CLIENT_SETUP * Used to setup an additional connection on a port. Completes on the * RCQ of the multi-queue/RSS connection being initialized. * * RAMROD_CMD_ID_ETH_STAT_QUERY * Used to force the storm processors to update the statistics database * in host memory. This ramrod is send on the leading connection CID and * completes as an index increment of the CSTORM on the default status * block. * * RAMROD_CMD_ID_ETH_UPDATE * Used to update the state of the leading connection, usually to udpate * the RSS indirection table. Completes on the RCQ of the leading * connection. (Not currently used under FreeBSD until OS support becomes * available.) * * RAMROD_CMD_ID_ETH_HALT * Used when tearing down a connection prior to driver unload. Completes * on the RCQ of the multi-queue/RSS connection being torn down. Don't * use this on the leading connection. * * RAMROD_CMD_ID_ETH_SET_MAC * Sets the Unicast/Broadcast/Multicast used by the port. Completes on * the RCQ of the leading connection. * * RAMROD_CMD_ID_ETH_CFC_DEL * Used when tearing down a conneciton prior to driver unload. Completes * on the RCQ of the leading connection (since the current connection * has been completely removed from controller memory). * * RAMROD_CMD_ID_ETH_PORT_DEL * Used to tear down the leading connection prior to driver unload, * typically fp[0]. Completes as an index increment of the CSTORM on the * default status block. * * RAMROD_CMD_ID_ETH_FORWARD_SETUP * Used for connection offload. Completes on the RCQ of the multi-queue * RSS connection that is being offloaded. (Not currently used under * FreeBSD.) * * There can only be one command pending per function. * * Returns: * 0 = Success, !0 = Failure. */ /* must be called under the spq lock */ static inline struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) { struct eth_spe *next_spe = sc->spq_prod_bd; if (sc->spq_prod_bd == sc->spq_last_bd) { /* wrap back to the first eth_spq */ sc->spq_prod_bd = sc->spq; sc->spq_prod_idx = 0; } else { sc->spq_prod_bd++; sc->spq_prod_idx++; } return (next_spe); } /* must be called under the spq lock */ static inline void bxe_sp_prod_update(struct bxe_softc *sc) { int func = SC_FUNC(sc); /* * Make sure that BD data is updated before writing the producer. * BD data is written to the memory, the producer is read from the * memory, thus we need a full memory barrier to ensure the ordering. */ mb(); REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), sc->spq_prod_idx); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); } /** * bxe_is_contextless_ramrod - check if the current command ends on EQ * * @cmd: command to check * @cmd_type: command type */ static inline int bxe_is_contextless_ramrod(int cmd, int cmd_type) { if ((cmd_type == NONE_CONNECTION_TYPE) || (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { return (TRUE); } else { return (FALSE); } } /** * bxe_sp_post - place a single command on an SP ring * * @sc: driver handle * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) * @cid: SW CID the command is related to * @data_hi: command private data address (high 32 bits) * @data_lo: command private data address (low 32 bits) * @cmd_type: command type (e.g. NONE, ETH) * * SP data is handled as if it's always an address pair, thus data fields are * not swapped to little endian in upper functions. Instead this function swaps * data as if it's two uint32 fields. */ int bxe_sp_post(struct bxe_softc *sc, int command, int cid, uint32_t data_hi, uint32_t data_lo, int cmd_type) { struct eth_spe *spe; uint16_t type; int common; common = bxe_is_contextless_ramrod(command, cmd_type); BXE_SP_LOCK(sc); if (common) { if (!atomic_load_acq_long(&sc->eq_spq_left)) { BLOGE(sc, "EQ ring is full!\n"); BXE_SP_UNLOCK(sc); return (-1); } } else { if (!atomic_load_acq_long(&sc->cq_spq_left)) { BLOGE(sc, "SPQ ring is full!\n"); BXE_SP_UNLOCK(sc); return (-1); } } spe = bxe_sp_get_next(sc); /* CID needs port number to be encoded int it */ spe->hdr.conn_and_cmd_data = htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid)); type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE; /* TBD: Check if it works for VFs */ type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) & SPE_HDR_T_FUNCTION_ID); spe->hdr.type = htole16(type); spe->data.update_data_addr.hi = htole32(data_hi); spe->data.update_data_addr.lo = htole32(data_lo); /* * It's ok if the actual decrement is issued towards the memory * somewhere between the lock and unlock. Thus no more explict * memory barrier is needed. */ if (common) { atomic_subtract_acq_long(&sc->eq_spq_left, 1); } else { atomic_subtract_acq_long(&sc->cq_spq_left, 1); } BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); BLOGD(sc, DBG_SP, "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", sc->spq_prod_idx, (uint32_t)U64_HI(sc->spq_dma.paddr), (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), command, common, HW_CID(sc, cid), data_hi, data_lo, type, atomic_load_acq_long(&sc->cq_spq_left), atomic_load_acq_long(&sc->eq_spq_left)); bxe_sp_prod_update(sc); BXE_SP_UNLOCK(sc); return (0); } /** * bxe_debug_print_ind_table - prints the indirection table configuration. * * @sc: driver hanlde * @p: pointer to rss configuration */ /* * FreeBSD Device probe function. * * Compares the device found to the driver's list of supported devices and * reports back to the bsd loader whether this is the right driver for the device. * This is the driver entry function called from the "kldload" command. * * Returns: * BUS_PROBE_DEFAULT on success, positive value on failure. */ static int bxe_probe(device_t dev) { struct bxe_device_type *t; char *descbuf; uint16_t did, sdid, svid, vid; /* Find our device structure */ t = bxe_devs; /* Get the data for the device to be probed. */ vid = pci_get_vendor(dev); did = pci_get_device(dev); svid = pci_get_subvendor(dev); sdid = pci_get_subdevice(dev); /* Look through the list of known devices for a match. */ while (t->bxe_name != NULL) { if ((vid == t->bxe_vid) && (did == t->bxe_did) && ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); if (descbuf == NULL) return (ENOMEM); /* Print out the device identity. */ snprintf(descbuf, BXE_DEVDESC_MAX, "%s (%c%d) BXE v:%s\n", t->bxe_name, (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), (pci_read_config(dev, PCIR_REVID, 4) & 0xf), BXE_DRIVER_VERSION); device_set_desc_copy(dev, descbuf); free(descbuf, M_TEMP); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void bxe_init_mutexes(struct bxe_softc *sc) { #ifdef BXE_CORE_LOCK_SX snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), "bxe%d_core_lock", sc->unit); sx_init(&sc->core_sx, sc->core_sx_name); #else snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), "bxe%d_core_lock", sc->unit); mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); #endif snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), "bxe%d_sp_lock", sc->unit); mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), "bxe%d_dmae_lock", sc->unit); mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), "bxe%d_phy_lock", sc->unit); mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), "bxe%d_fwmb_lock", sc->unit); mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), "bxe%d_print_lock", sc->unit); mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), "bxe%d_stats_lock", sc->unit); mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), "bxe%d_mcast_lock", sc->unit); mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); } static void bxe_release_mutexes(struct bxe_softc *sc) { #ifdef BXE_CORE_LOCK_SX sx_destroy(&sc->core_sx); #else if (mtx_initialized(&sc->core_mtx)) { mtx_destroy(&sc->core_mtx); } #endif if (mtx_initialized(&sc->sp_mtx)) { mtx_destroy(&sc->sp_mtx); } if (mtx_initialized(&sc->dmae_mtx)) { mtx_destroy(&sc->dmae_mtx); } if (mtx_initialized(&sc->port.phy_mtx)) { mtx_destroy(&sc->port.phy_mtx); } if (mtx_initialized(&sc->fwmb_mtx)) { mtx_destroy(&sc->fwmb_mtx); } if (mtx_initialized(&sc->print_mtx)) { mtx_destroy(&sc->print_mtx); } if (mtx_initialized(&sc->stats_mtx)) { mtx_destroy(&sc->stats_mtx); } if (mtx_initialized(&sc->mcast_mtx)) { mtx_destroy(&sc->mcast_mtx); } } static void bxe_tx_disable(struct bxe_softc* sc) { if_t ifp = sc->ifp; /* tell the stack the driver is stopped and TX queue is full */ if (ifp != NULL) { if_setdrvflags(ifp, 0); } } static void bxe_drv_pulse(struct bxe_softc *sc) { SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, sc->fw_drv_pulse_wr_seq); } static inline uint16_t bxe_tx_avail(struct bxe_softc *sc, struct bxe_fastpath *fp) { int16_t used; uint16_t prod; uint16_t cons; prod = fp->tx_bd_prod; cons = fp->tx_bd_cons; used = SUB_S16(prod, cons); return (int16_t)(sc->tx_ring_size) - used; } static inline int bxe_tx_queue_has_work(struct bxe_fastpath *fp) { uint16_t hw_cons; mb(); /* status block fields can change */ hw_cons = le16toh(*fp->tx_cons_sb); return (hw_cons != fp->tx_pkt_cons); } static inline uint8_t bxe_has_tx_work(struct bxe_fastpath *fp) { /* expand this for multi-cos if ever supported */ return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; } static inline int bxe_has_rx_work(struct bxe_fastpath *fp) { uint16_t rx_cq_cons_sb; mb(); /* status block fields can change */ rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) rx_cq_cons_sb++; return (fp->rx_cq_cons != rx_cq_cons_sb); } static void bxe_sp_event(struct bxe_softc *sc, struct bxe_fastpath *fp, union eth_rx_cqe *rr_cqe) { int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); switch (command) { case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); drv_cmd = ECORE_Q_CMD_UPDATE; break; case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); drv_cmd = ECORE_Q_CMD_SETUP; break; case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; break; case (RAMROD_CMD_ID_ETH_HALT): BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); drv_cmd = ECORE_Q_CMD_HALT; break; case (RAMROD_CMD_ID_ETH_TERMINATE): BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); drv_cmd = ECORE_Q_CMD_TERMINATE; break; case (RAMROD_CMD_ID_ETH_EMPTY): BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); drv_cmd = ECORE_Q_CMD_EMPTY; break; default: BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", command, fp->index); return; } if ((drv_cmd != ECORE_Q_CMD_MAX) && q_obj->complete_cmd(sc, q_obj, drv_cmd)) { /* * q_obj->complete_cmd() failure means that this was * an unexpected completion. * * In this case we don't want to increase the sc->spq_left * because apparently we haven't sent this command the first * place. */ // bxe_panic(sc, ("Unexpected SP completion\n")); return; } atomic_add_acq_long(&sc->cq_spq_left, 1); BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", atomic_load_acq_long(&sc->cq_spq_left)); } /* * The current mbuf is part of an aggregation. Move the mbuf into the TPA * aggregation queue, put an empty mbuf back onto the receive chain, and mark * the current aggregation queue as in-progress. */ static void bxe_tpa_start(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t queue, uint16_t cons, uint16_t prod, struct eth_fast_path_rx_cqe *cqe) { struct bxe_sw_rx_bd tmp_bd; struct bxe_sw_rx_bd *rx_buf; struct eth_rx_bd *rx_bd; int max_agg_queues; struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; uint16_t index; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " "cons=%d prod=%d\n", fp->index, queue, cons, prod); max_agg_queues = MAX_AGG_QS(sc); KASSERT((queue < max_agg_queues), ("fp[%02d] invalid aggr queue (%d >= %d)!", fp->index, queue, max_agg_queues)); KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", fp->index, queue)); /* copy the existing mbuf and mapping from the TPA pool */ tmp_bd = tpa_info->bd; if (tmp_bd.m == NULL) { uint32_t *tmp; tmp = (uint32_t *)cqe; BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n", fp->index, queue, cons, prod); BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); /* XXX Error handling? */ return; } /* change the TPA queue to the start state */ tpa_info->state = BXE_TPA_STATE_START; tpa_info->placement_offset = cqe->placement_offset; tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); tpa_info->vlan_tag = le16toh(cqe->vlan_tag); tpa_info->len_on_bd = le16toh(cqe->len_on_bd); fp->rx_tpa_queue_used |= (1 << queue); /* * If all the buffer descriptors are filled with mbufs then fill in * the current consumer index with a new BD. Else if a maximum Rx * buffer limit is imposed then fill in the next producer index. */ index = (sc->max_rx_bufs != RX_BD_USABLE) ? prod : cons; /* move the received mbuf and mapping to TPA pool */ tpa_info->bd = fp->rx_mbuf_chain[cons]; /* release any existing RX BD mbuf mappings */ if (cons != index) { rx_buf = &fp->rx_mbuf_chain[cons]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* * We get here when the maximum number of rx buffers is less than * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL * it out here without concern of a memory leak. */ fp->rx_mbuf_chain[cons].m = NULL; } /* update the Rx SW BD with the mbuf info from the TPA pool */ fp->rx_mbuf_chain[index] = tmp_bd; /* update the Rx BD with the empty mbuf phys address from the TPA pool */ rx_bd = &fp->rx_chain[index]; rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); } /* * When a TPA aggregation is completed, loop through the individual mbufs * of the aggregation, combining them into a single mbuf which will be sent * up the stack. Refill all freed SGEs with mbufs as we go along. */ static int bxe_fill_frag_mbuf(struct bxe_softc *sc, struct bxe_fastpath *fp, struct bxe_sw_tpa_info *tpa_info, uint16_t queue, uint16_t pages, struct mbuf *m, struct eth_end_agg_rx_cqe *cqe, uint16_t cqe_idx) { struct mbuf *m_frag; uint32_t frag_len, frag_size, i; uint16_t sge_idx; int rc = 0; int j; frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", fp->index, queue, tpa_info->len_on_bd, frag_size, pages); /* make sure the aggregated frame is not too big to handle */ if (pages > 8 * PAGES_PER_SGE) { uint32_t *tmp = (uint32_t *)cqe; BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " "pkt_len=%d len_on_bd=%d frag_size=%d\n", fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), tpa_info->len_on_bd, frag_size); BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); bxe_panic(sc, ("sge page count error\n")); return (EINVAL); } /* * Scan through the scatter gather list pulling individual mbufs into a * single mbuf for the host stack. */ for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); /* * Firmware gives the indices of the SGE as if the ring is an array * (meaning that the "next" element will consume 2 indices). */ frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " "sge_idx=%d frag_size=%d frag_len=%d\n", fp->index, queue, i, j, sge_idx, frag_size, frag_len); m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; /* allocate a new mbuf for the SGE */ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); if (rc) { /* Leave all remaining SGEs in the ring! */ return (rc); } /* update the fragment length */ m_frag->m_len = frag_len; /* concatenate the fragment to the head mbuf */ m_cat(m, m_frag); fp->eth_q_stats.mbuf_alloc_sge--; /* update the TPA mbuf size and remaining fragment size */ m->m_pkthdr.len += frag_len; frag_size -= frag_len; } BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", fp->index, queue, frag_size); return (rc); } static inline void bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) { int i, j; for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; for (j = 0; j < 2; j++) { BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); idx--; } } } static inline void bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) { /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); /* * Clear the two last indices in the page to 1. These are the indices that * correspond to the "next" element, hence will never be indicated and * should be removed from the calculations. */ bxe_clear_sge_mask_next_elems(fp); } static inline void bxe_update_last_max_sge(struct bxe_fastpath *fp, uint16_t idx) { uint16_t last_max = fp->last_max_sge; if (SUB_S16(idx, last_max) > 0) { fp->last_max_sge = idx; } } static inline void bxe_update_sge_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t sge_len, union eth_sgl_or_raw_data *cqe) { uint16_t last_max, last_elem, first_elem; uint16_t delta = 0; uint16_t i; if (!sge_len) { return; } /* first mark all used pages */ for (i = 0; i < sge_len; i++) { BIT_VEC64_CLEAR_BIT(fp->sge_mask, RX_SGE(le16toh(cqe->sgl[i]))); } BLOGD(sc, DBG_LRO, "fp[%02d] fp_cqe->sgl[%d] = %d\n", fp->index, sge_len - 1, le16toh(cqe->sgl[sge_len - 1])); /* assume that the last SGE index is the biggest */ bxe_update_last_max_sge(fp, le16toh(cqe->sgl[sge_len - 1])); last_max = RX_SGE(fp->last_max_sge); last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; /* if ring is not full */ if (last_elem + 1 != first_elem) { last_elem++; } /* now update the prod */ for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { if (__predict_true(fp->sge_mask[i])) { break; } fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; delta += BIT_VEC64_ELEM_SZ; } if (delta > 0) { fp->rx_sge_prod += delta; /* clear page-end entries */ bxe_clear_sge_mask_next_elems(fp); } BLOGD(sc, DBG_LRO, "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", fp->index, fp->last_max_sge, fp->rx_sge_prod); } /* * The aggregation on the current TPA queue has completed. Pull the individual * mbuf fragments together into a single mbuf, perform all necessary checksum * calculations, and send the resuting mbuf to the stack. */ static void bxe_tpa_stop(struct bxe_softc *sc, struct bxe_fastpath *fp, struct bxe_sw_tpa_info *tpa_info, uint16_t queue, uint16_t pages, struct eth_end_agg_rx_cqe *cqe, uint16_t cqe_idx) { if_t ifp = sc->ifp; struct mbuf *m; int rc = 0; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", fp->index, queue, tpa_info->placement_offset, le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); m = tpa_info->bd.m; /* allocate a replacement before modifying existing mbuf */ rc = bxe_alloc_rx_tpa_mbuf(fp, queue); if (rc) { /* drop the frame and log an error */ fp->eth_q_stats.rx_soft_errors++; goto bxe_tpa_stop_exit; } /* we have a replacement, fixup the current mbuf */ m_adj(m, tpa_info->placement_offset); m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; /* mark the checksums valid (taken care of by the firmware) */ fp->eth_q_stats.rx_ofld_frames_csum_ip++; fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; m->m_pkthdr.csum_data = 0xffff; m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); /* aggregate all of the SGEs into a single mbuf */ rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); if (rc) { /* drop the packet and log an error */ fp->eth_q_stats.rx_soft_errors++; m_freem(m); } else { if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) { m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; m->m_flags |= M_VLANTAG; } /* assign packet to this interface interface */ if_setrcvif(m, ifp); /* specify what RSS queue was used for this flow */ m->m_pkthdr.flowid = fp->index; BXE_SET_FLOWID(m); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); fp->eth_q_stats.rx_tpa_pkts++; /* pass the frame to the stack */ if_input(ifp, m); } /* we passed an mbuf up the stack or dropped the frame */ fp->eth_q_stats.mbuf_alloc_tpa--; bxe_tpa_stop_exit: fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; fp->rx_tpa_queue_used &= ~(1 << queue); } static uint8_t bxe_service_rxsgl( struct bxe_fastpath *fp, uint16_t len, uint16_t lenonbd, struct mbuf *m, struct eth_fast_path_rx_cqe *cqe_fp) { struct mbuf *m_frag; uint16_t frags, frag_len; uint16_t sge_idx = 0; uint16_t j; uint8_t i, rc = 0; uint32_t frag_size; /* adjust the mbuf */ m->m_len = lenonbd; frag_size = len - lenonbd; frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); m_frag->m_len = frag_len; /* allocate a new mbuf for the SGE */ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); if (rc) { /* Leave all remaining SGEs in the ring! */ return (rc); } fp->eth_q_stats.mbuf_alloc_sge--; /* concatenate the fragment to the head mbuf */ m_cat(m, m_frag); frag_size -= frag_len; } bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); return rc; } static uint8_t bxe_rxeof(struct bxe_softc *sc, struct bxe_fastpath *fp) { if_t ifp = sc->ifp; uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; int rx_pkts = 0; int rc = 0; BXE_FP_RX_LOCK(fp); /* CQ "next element" is of the size of the regular element */ hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { hw_cq_cons++; } bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; bd_prod_fw = bd_prod; sw_cq_cons = fp->rx_cq_cons; sw_cq_prod = fp->rx_cq_prod; /* * Memory barrier necessary as speculative reads of the rx * buffer can be ahead of the index in the status block */ rmb(); BLOGD(sc, DBG_RX, "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", fp->index, hw_cq_cons, sw_cq_cons); while (sw_cq_cons != hw_cq_cons) { struct bxe_sw_rx_bd *rx_buf = NULL; union eth_rx_cqe *cqe; struct eth_fast_path_rx_cqe *cqe_fp; uint8_t cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; uint16_t len, lenonbd, pad; struct mbuf *m = NULL; comp_ring_cons = RCQ(sw_cq_cons); bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); cqe = &fp->rcq_chain[comp_ring_cons]; cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; BLOGD(sc, DBG_RX, "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " "BD prod=%d cons=%d CQE type=0x%x err=0x%x " "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", fp->index, hw_cq_cons, sw_cq_cons, bd_prod, bd_cons, CQE_TYPE(cqe_fp_flags), cqe_fp_flags, cqe_fp->status_flags, le32toh(cqe_fp->rss_hash_result), le16toh(cqe_fp->vlan_tag), le16toh(cqe_fp->pkt_len_or_gro_seg_len), le16toh(cqe_fp->len_on_bd)); /* is this a slowpath msg? */ if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { bxe_sp_event(sc, fp, cqe); goto next_cqe; } rx_buf = &fp->rx_mbuf_chain[bd_cons]; if (!CQE_TYPE_FAST(cqe_fp_type)) { struct bxe_sw_tpa_info *tpa_info; uint16_t frag_size, pages; uint8_t queue; if (CQE_TYPE_START(cqe_fp_type)) { bxe_tpa_start(sc, fp, cqe_fp->queue_index, bd_cons, bd_prod, cqe_fp); m = NULL; /* packet not ready yet */ goto next_rx; } KASSERT(CQE_TYPE_STOP(cqe_fp_type), ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); queue = cqe->end_agg_cqe.queue_index; tpa_info = &fp->rx_tpa_info[queue]; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", fp->index, queue); frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - tpa_info->len_on_bd); pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; bxe_tpa_stop(sc, fp, tpa_info, queue, pages, &cqe->end_agg_cqe, comp_ring_cons); bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); goto next_cqe; } /* non TPA */ /* is this an error packet? */ if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); fp->eth_q_stats.rx_soft_errors++; goto next_rx; } len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); lenonbd = le16toh(cqe_fp->len_on_bd); pad = cqe_fp->placement_offset; m = rx_buf->m; if (__predict_false(m == NULL)) { BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", bd_cons, fp->index); goto next_rx; } /* XXX double copy if packet length under a threshold */ /* * If all the buffer descriptors are filled with mbufs then fill in * the current consumer index with a new BD. Else if a maximum Rx * buffer limit is imposed then fill in the next producer index. */ rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, (sc->max_rx_bufs != RX_BD_USABLE) ? bd_prod : bd_cons); if (rc != 0) { /* we simply reuse the received mbuf and don't post it to the stack */ m = NULL; BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", fp->index, rc); fp->eth_q_stats.rx_soft_errors++; if (sc->max_rx_bufs != RX_BD_USABLE) { /* copy this consumer index to the producer index */ memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, sizeof(struct bxe_sw_rx_bd)); memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); } goto next_rx; } /* current mbuf was detached from the bd */ fp->eth_q_stats.mbuf_alloc_rx--; /* we allocated a replacement mbuf, fixup the current one */ m_adj(m, pad); m->m_pkthdr.len = m->m_len = len; if ((len > 60) && (len > lenonbd)) { fp->eth_q_stats.rx_bxe_service_rxsgl++; rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); if (rc) break; fp->eth_q_stats.rx_jumbo_sge_pkts++; } else if (lenonbd < len) { fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; } /* assign packet to this interface interface */ if_setrcvif(m, ifp); /* assume no hardware checksum has complated */ m->m_pkthdr.csum_flags = 0; /* validate checksum if offload enabled */ if (if_getcapenable(ifp) & IFCAP_RXCSUM) { /* check for a valid IP frame */ if (!(cqe->fast_path_cqe.status_flags & ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { fp->eth_q_stats.rx_hw_csum_errors++; } else { fp->eth_q_stats.rx_ofld_frames_csum_ip++; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } } /* check for a valid TCP/UDP frame */ if (!(cqe->fast_path_cqe.status_flags & ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { fp->eth_q_stats.rx_hw_csum_errors++; } else { fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; m->m_pkthdr.csum_data = 0xFFFF; m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); } } } /* if there is a VLAN tag then flag that info */ if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) { m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; m->m_flags |= M_VLANTAG; } /* specify what RSS queue was used for this flow */ m->m_pkthdr.flowid = fp->index; BXE_SET_FLOWID(m); next_rx: bd_cons = RX_BD_NEXT(bd_cons); bd_prod = RX_BD_NEXT(bd_prod); bd_prod_fw = RX_BD_NEXT(bd_prod_fw); /* pass the frame to the stack */ if (__predict_true(m != NULL)) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); rx_pkts++; if_input(ifp, m); } next_cqe: sw_cq_prod = RCQ_NEXT(sw_cq_prod); sw_cq_cons = RCQ_NEXT(sw_cq_cons); /* limit spinning on the queue */ if (rc != 0) break; if (rx_pkts == sc->rx_budget) { fp->eth_q_stats.rx_budget_reached++; break; } } /* while work to do */ fp->rx_bd_cons = bd_cons; fp->rx_bd_prod = bd_prod_fw; fp->rx_cq_cons = sw_cq_cons; fp->rx_cq_prod = sw_cq_prod; /* Update producers */ bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); fp->eth_q_stats.rx_pkts += rx_pkts; fp->eth_q_stats.rx_calls++; BXE_FP_RX_UNLOCK(fp); return (sw_cq_cons != hw_cq_cons); } static uint16_t bxe_free_tx_pkt(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t idx) { struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_idx = TX_BD(tx_buf->first_bd); uint16_t new_cons; int nbd; /* unmap the mbuf from non-paged memory */ bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); tx_start_bd = &fp->tx_chain[bd_idx].start_bd; nbd = le16toh(tx_start_bd->nbd) - 1; new_cons = (tx_buf->first_bd + nbd); /* free the mbuf */ if (__predict_true(tx_buf->m != NULL)) { m_freem(tx_buf->m); fp->eth_q_stats.mbuf_alloc_tx--; } else { fp->eth_q_stats.tx_chain_lost_mbuf++; } tx_buf->m = NULL; tx_buf->first_bd = 0; return (new_cons); } /* transmit timeout watchdog */ static int bxe_watchdog(struct bxe_softc *sc, struct bxe_fastpath *fp) { BXE_FP_TX_LOCK(fp); if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { BXE_FP_TX_UNLOCK(fp); return (0); } BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); BXE_FP_TX_UNLOCK(fp); BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return (-1); } /* processes transmit completions */ static uint8_t bxe_txeof(struct bxe_softc *sc, struct bxe_fastpath *fp) { if_t ifp = sc->ifp; uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; uint16_t tx_bd_avail; BXE_FP_TX_LOCK_ASSERT(fp); bd_cons = fp->tx_bd_cons; hw_cons = le16toh(*fp->tx_cons_sb); sw_cons = fp->tx_pkt_cons; while (sw_cons != hw_cons) { pkt_cons = TX_BD(sw_cons); BLOGD(sc, DBG_TX, "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", fp->index, hw_cons, sw_cons, pkt_cons); bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); sw_cons++; } fp->tx_pkt_cons = sw_cons; fp->tx_bd_cons = bd_cons; BLOGD(sc, DBG_TX, "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); mb(); tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); } else { if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } if (fp->tx_pkt_prod != fp->tx_pkt_cons) { /* reset the watchdog timer if there are pending transmits */ fp->watchdog_timer = BXE_TX_TIMEOUT; return (TRUE); } else { /* clear watchdog when there are no pending transmits */ fp->watchdog_timer = 0; return (FALSE); } } static void bxe_drain_tx_queues(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i, count; /* wait until all TX fastpath tasks have completed */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; count = 1000; while (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); if (count == 0) { BLOGE(sc, "Timeout waiting for fp[%d] " "transmits to complete!\n", i); bxe_panic(sc, ("tx drain failure\n")); return; } count--; DELAY(1000); rmb(); } } return; } static int bxe_del_all_macs(struct bxe_softc *sc, struct ecore_vlan_mac_obj *mac_obj, int mac_type, uint8_t wait_for_comp) { unsigned long ramrod_flags = 0, vlan_mac_flags = 0; int rc; /* wait for completion of requested */ if (wait_for_comp) { bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); } /* Set the mac type of addresses we want to clear */ bxe_set_bit(mac_type, &vlan_mac_flags); rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc < 0) { BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n", rc, mac_type, wait_for_comp); } return (rc); } static int bxe_fill_accept_flags(struct bxe_softc *sc, uint32_t rx_mode, unsigned long *rx_accept_flags, unsigned long *tx_accept_flags) { /* Clear the flags first */ *rx_accept_flags = 0; *tx_accept_flags = 0; switch (rx_mode) { case BXE_RX_MODE_NONE: /* * 'drop all' supersedes any accept flags that may have been * passed to the function. */ break; case BXE_RX_MODE_NORMAL: bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); break; case BXE_RX_MODE_ALLMULTI: bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); break; case BXE_RX_MODE_PROMISC: /* * According to deffinition of SI mode, iface in promisc mode * should receive matched and unmatched (in resolution of port) * unicast packets. */ bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); if (IS_MF_SI(sc)) { bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); } else { bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); } break; default: BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode); return (-1); } /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ if (rx_mode != BXE_RX_MODE_NONE) { bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); } return (0); } static int bxe_set_q_rx_mode(struct bxe_softc *sc, uint8_t cl_id, unsigned long rx_mode_flags, unsigned long rx_accept_flags, unsigned long tx_accept_flags, unsigned long ramrod_flags) { struct ecore_rx_mode_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* Prepare ramrod parameters */ ramrod_param.cid = 0; ramrod_param.cl_id = cl_id; ramrod_param.rx_mode_obj = &sc->rx_mode_obj; ramrod_param.func_id = SC_FUNC(sc); ramrod_param.pstate = &sc->sp_state; ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); ramrod_param.ramrod_flags = ramrod_flags; ramrod_param.rx_mode_flags = rx_mode_flags; ramrod_param.rx_accept_flags = rx_accept_flags; ramrod_param.tx_accept_flags = tx_accept_flags; rc = ecore_config_rx_mode(sc, &ramrod_param); if (rc < 0) { BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x " "rx_accept_flags 0x%x tx_accept_flags 0x%x " "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id, (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags, (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc); return (rc); } return (0); } static int bxe_set_storm_rx_mode(struct bxe_softc *sc) { unsigned long rx_mode_flags = 0, ramrod_flags = 0; unsigned long rx_accept_flags = 0, tx_accept_flags = 0; int rc; rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, &tx_accept_flags); if (rc) { return (rc); } bxe_set_bit(RAMROD_RX, &ramrod_flags); bxe_set_bit(RAMROD_TX, &ramrod_flags); /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, rx_accept_flags, tx_accept_flags, ramrod_flags)); } /* returns the "mcp load_code" according to global load_count array */ static int bxe_nic_load_no_mcp(struct bxe_softc *sc) { int path = SC_PATH(sc); int port = SC_PORT(sc); BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]++; load_count[path][1 + port]++; BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 1) { return (FW_MSG_CODE_DRV_LOAD_COMMON); } else if (load_count[path][1 + port] == 1) { return (FW_MSG_CODE_DRV_LOAD_PORT); } else { return (FW_MSG_CODE_DRV_LOAD_FUNCTION); } } /* returns the "mcp load_code" according to global load_count array */ static int bxe_nic_unload_no_mcp(struct bxe_softc *sc) { int port = SC_PORT(sc); int path = SC_PATH(sc); BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]--; load_count[path][1 + port]--; BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 0) { return (FW_MSG_CODE_DRV_UNLOAD_COMMON); } else if (load_count[path][1 + port] == 0) { return (FW_MSG_CODE_DRV_UNLOAD_PORT); } else { return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); } } /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ static uint32_t bxe_send_unload_req(struct bxe_softc *sc, int unload_mode) { uint32_t reset_code = 0; /* Select the UNLOAD request mode */ if (unload_mode == UNLOAD_NORMAL) { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } else { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } /* Send the request to the MCP */ if (!BXE_NOMCP(sc)) { reset_code = bxe_fw_command(sc, reset_code, 0); } else { reset_code = bxe_nic_unload_no_mcp(sc); } return (reset_code); } /* send UNLOAD_DONE command to the MCP */ static void bxe_send_unload_done(struct bxe_softc *sc, uint8_t keep_link) { uint32_t reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; /* Report UNLOAD_DONE to MCP */ if (!BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); } } static int bxe_func_wait_started(struct bxe_softc *sc) { int tout = 50; if (!sc->port.pmf) { return (0); } /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TX disable/enable transaction * 1. Sync IRS for default SB * 2. Sync SP queue - this guarantees us that attention handling started * 3. Wait, that TX disable/enable transaction completes * * 1+2 guarantee that if DCBX attention was scheduled it already changed * pending bit of transaction from STARTED-->TX_STOPPED, if we already * received completion for the transaction the state is TX_STOPPED. * State will return to STARTED after completion of TX_STOPPED-->STARTED * transaction. */ /* XXX make sure default SB ISR is done */ /* need a way to synchronize an irq (intr_mtx?) */ /* XXX flush any work queues */ while (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED && tout--) { DELAY(20000); } if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { /* * Failed to complete the transaction in a "good way" * Force both transactions with CLR bit. */ struct ecore_func_state_params func_params = { NULL }; BLOGE(sc, "Unexpected function state! " "Forcing STARTED-->TX_STOPPED-->STARTED\n"); func_params.f_obj = &sc->func_obj; bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); /* STARTED-->TX_STOPPED */ func_params.cmd = ECORE_F_CMD_TX_STOP; ecore_func_state_change(sc, &func_params); /* TX_STOPPED-->STARTED */ func_params.cmd = ECORE_F_CMD_TX_START; return (ecore_func_state_change(sc, &func_params)); } return (0); } static int bxe_stop_queue(struct bxe_softc *sc, int index) { struct bxe_fastpath *fp = &sc->fp[index]; struct ecore_queue_state_params q_params = { NULL }; int rc; BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); q_params.q_obj = &sc->sp_objs[fp->index].q_obj; /* We want to wait for completion in this context */ bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* Stop the primary connection: */ /* ...halt the connection */ q_params.cmd = ECORE_Q_CMD_HALT; rc = ecore_queue_state_change(sc, &q_params); if (rc) { return (rc); } /* ...terminate the connection */ q_params.cmd = ECORE_Q_CMD_TERMINATE; memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; rc = ecore_queue_state_change(sc, &q_params); if (rc) { return (rc); } /* ...delete cfc entry */ q_params.cmd = ECORE_Q_CMD_CFC_DEL; memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; return (ecore_queue_state_change(sc, &q_params)); } /* wait for the outstanding SP commands */ static inline uint8_t bxe_wait_sp_comp(struct bxe_softc *sc, unsigned long mask) { unsigned long tmp; int tout = 5000; /* wait for 5 secs tops */ while (tout--) { mb(); if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { return (TRUE); } DELAY(1000); } mb(); tmp = atomic_load_acq_long(&sc->sp_state); if (tmp & mask) { BLOGE(sc, "Filtering completion timed out: " "sp_state 0x%lx, mask 0x%lx\n", tmp, mask); return (FALSE); } return (FALSE); } static int bxe_func_stop(struct bxe_softc *sc) { struct ecore_func_state_params func_params = { NULL }; int rc; /* prepare parameters for function state transitions */ bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_STOP; /* * Try to stop the function the 'good way'. If it fails (in case * of a parity error during bxe_chip_cleanup()) and we are * not in a debug mode, perform a state transaction in order to * enable further HW_RESET transaction. */ rc = ecore_func_state_change(sc, &func_params); if (rc) { BLOGE(sc, "FUNC_STOP ramrod failed. " "Running a dry transaction (%d)\n", rc); bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); return (ecore_func_state_change(sc, &func_params)); } return (0); } static int bxe_reset_hw(struct bxe_softc *sc, uint32_t load_code) { struct ecore_func_state_params func_params = { NULL }; /* Prepare parameters for function state transitions */ bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_HW_RESET; func_params.params.hw_init.load_phase = load_code; return (ecore_func_state_change(sc, &func_params)); } static void bxe_int_disable_sync(struct bxe_softc *sc, int disable_hw) { if (disable_hw) { /* prevent the HW from sending interrupts */ bxe_int_disable(sc); } /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ /* make sure all ISRs are done */ /* XXX make sure sp_task is not running */ /* cancel and flush work queues */ } static void bxe_chip_cleanup(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link) { int port = SC_PORT(sc); struct ecore_mcast_ramrod_params rparam = { NULL }; uint32_t reset_code; int i, rc = 0; bxe_drain_tx_queues(sc); /* give HW time to discard old tx messages */ DELAY(1000); /* Clean all ETH MACs */ rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); if (rc < 0) { BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); } /* Clean up UC list */ rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); if (rc < 0) { BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); } /* Disable LLH */ if (!CHIP_IS_E1(sc)) { REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); } /* Set "drop all" to stop Rx */ /* * We need to take the BXE_MCAST_LOCK() here in order to prevent * a race between the completion code and this code. */ BXE_MCAST_LOCK(sc); if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); } else { bxe_set_storm_rx_mode(sc); } /* Clean up multicast configuration */ rparam.mcast_obj = &sc->mcast_obj; rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); } BXE_MCAST_UNLOCK(sc); // XXX bxe_iov_chip_cleanup(sc); /* * Send the UNLOAD_REQUEST to the MCP. This will return if * this function should perform FUNCTION, PORT, or COMMON HW * reset. */ reset_code = bxe_send_unload_req(sc, unload_mode); /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TX disable/enable transaction */ rc = bxe_func_wait_started(sc); if (rc) { BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc); } /* * Close multi and leading connections * Completions for ramrods are collected in a synchronous way */ for (i = 0; i < sc->num_queues; i++) { if (bxe_stop_queue(sc, i)) { goto unload_error; } } /* * If SP settings didn't get completed so far - something * very wrong has happen. */ if (!bxe_wait_sp_comp(sc, ~0x0UL)) { BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc); } unload_error: rc = bxe_func_stop(sc); if (rc) { BLOGE(sc, "Function stop failed!(%d)\n", rc); } /* disable HW interrupts */ bxe_int_disable_sync(sc, TRUE); /* detach interrupts */ bxe_interrupt_detach(sc); /* Reset the chip */ rc = bxe_reset_hw(sc, reset_code); if (rc) { BLOGE(sc, "Hardware reset failed(%d)\n", rc); } /* Report UNLOAD_DONE to MCP */ bxe_send_unload_done(sc, keep_link); } static void bxe_disable_close_the_gate(struct bxe_softc *sc) { uint32_t val; int port = SC_PORT(sc); BLOGD(sc, DBG_LOAD, "Disabling 'close the gates'\n"); if (CHIP_IS_E1(sc)) { uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; val = REG_RD(sc, addr); val &= ~(0x300); REG_WR(sc, addr, val); } else { val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); } } /* * Cleans the object that have internal lists without sending * ramrods. Should be run when interrutps are disabled. */ static void bxe_squeeze_objects(struct bxe_softc *sc) { unsigned long ramrod_flags = 0, vlan_mac_flags = 0; struct ecore_mcast_ramrod_params rparam = { NULL }; struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; int rc; /* Cleanup MACs' object first... */ /* Wait for completion of requested */ bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Perform a dry cleanup */ bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); /* Clean ETH primary MAC */ bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) { BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); } /* Cleanup UC list */ vlan_mac_flags = 0; bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) { BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); } /* Now clean mcast object... */ rparam.mcast_obj = &sc->mcast_obj; bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); /* Add a DEL command... */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); } /* now wait until all pending commands are cleared */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); while (rc != 0) { if (rc < 0) { BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); return; } rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); } } /* stop the controller */ static __noinline int bxe_nic_unload(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link) { uint8_t global = FALSE; uint32_t val; int i; BXE_CORE_LOCK_ASSERT(sc); if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); for (i = 0; i < sc->num_queues; i++) { struct bxe_fastpath *fp; fp = &sc->fp[i]; fp->watchdog_timer = 0; BXE_FP_TX_LOCK(fp); BXE_FP_TX_UNLOCK(fp); } BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); /* mark driver as unloaded in shmem2 */ if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); } if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { /* * We can get here if the driver has been unloaded * during parity error recovery and is either waiting for a * leader to complete or for other functions to unload and * then ifconfig down has been issued. In this case we want to * unload and let other functions to complete a recovery * process. */ sc->recovery_state = BXE_RECOVERY_DONE; sc->is_leader = 0; bxe_release_leader_lock(sc); mb(); BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); } BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x" " state = 0x%x\n", sc->recovery_state, sc->state); return (-1); } /* * Nothing to do during unload if previous bxe_nic_load() * did not completed successfully - all resourses are released. */ if ((sc->state == BXE_STATE_CLOSED) || (sc->state == BXE_STATE_ERROR)) { return (0); } sc->state = BXE_STATE_CLOSING_WAITING_HALT; mb(); /* stop tx */ bxe_tx_disable(sc); sc->rx_mode = BXE_RX_MODE_NONE; /* XXX set rx mode ??? */ if (IS_PF(sc) && !sc->grcdump_done) { /* set ALWAYS_ALIVE bit in shmem */ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bxe_drv_pulse(sc); bxe_stats_handle(sc, STATS_EVENT_STOP); bxe_save_statistics(sc); } /* wait till consumers catch up with producers in all queues */ bxe_drain_tx_queues(sc); /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations */ if (IS_VF(sc)) { ; /* bxe_vfpf_close_vf(sc); */ } else if (unload_mode != UNLOAD_RECOVERY) { /* if this is a normal/close unload need to clean up chip */ if (!sc->grcdump_done) bxe_chip_cleanup(sc, unload_mode, keep_link); } else { /* Send the UNLOAD_REQUEST to the MCP */ bxe_send_unload_req(sc, unload_mode); /* * Prevent transactions to host from the functions on the * engine that doesn't reset global blocks in case of global * attention once gloabl blocks are reset and gates are opened * (the engine which leader will perform the recovery * last). */ if (!CHIP_IS_E1x(sc)) { bxe_pf_disable(sc); } /* disable HW interrupts */ bxe_int_disable_sync(sc, TRUE); /* detach interrupts */ bxe_interrupt_detach(sc); /* Report UNLOAD_DONE to MCP */ bxe_send_unload_done(sc, FALSE); } /* * At this stage no more interrupts will arrive so we may safely clean * the queue'able objects here in case they failed to get cleaned so far. */ if (IS_PF(sc)) { bxe_squeeze_objects(sc); } /* There should be no more pending SP commands at this stage */ sc->sp_state = 0; sc->port.pmf = 0; bxe_free_fp_buffers(sc); if (IS_PF(sc)) { bxe_free_mem(sc); } bxe_free_fw_stats_mem(sc); sc->state = BXE_STATE_CLOSED; /* * Check if there are pending parity attentions. If there are - set * RECOVERY_IN_PROGRESS. */ if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { bxe_set_reset_in_progress(sc); /* Set RESET_IS_GLOBAL if needed */ if (global) { bxe_set_reset_global(sc); } } /* * The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ if (IS_PF(sc) && !bxe_clear_pf_load(sc) && bxe_reset_is_done(sc, SC_PATH(sc))) { bxe_disable_close_the_gate(sc); } BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); bxe_link_report(sc); return (0); } /* * Called by the OS to set various media options (i.e. link, speed, etc.) when * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". */ static int bxe_ifmedia_update(struct ifnet *ifp) { struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); struct ifmedia *ifm; ifm = &sc->ifmedia; /* We only support Ethernet media type. */ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { return (EINVAL); } switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; case IFM_10G_CX4: case IFM_10G_SR: case IFM_10G_T: case IFM_10G_TWINAX: default: /* We don't support changing the media type. */ BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", IFM_SUBTYPE(ifm->ifm_media)); return (EINVAL); } return (0); } /* * Called by the OS to get the current media status (i.e. link, speed, etc.). */ static void bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct bxe_softc *sc = if_getsoftc(ifp); /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..." line if the IFM_AVALID flag is *NOT* set. So we need to set this flag unconditionally (irrespective of the admininistrative 'up/down' state of the interface) to ensure that that line is always displayed. */ ifmr->ifm_status = IFM_AVALID; /* Setup the default interface info. */ ifmr->ifm_active = IFM_ETHER; /* Report link down if the driver isn't running. */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { ifmr->ifm_active |= IFM_NONE; BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__); BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n", __func__, sc->link_vars.link_up); return; } if (sc->link_vars.link_up) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_FDX; } else { ifmr->ifm_active |= IFM_NONE; BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n", __func__); return; } ifmr->ifm_active |= sc->media; return; } static void bxe_handle_chip_tq(void *context, int pending) { struct bxe_softc *sc = (struct bxe_softc *)context; long work = atomic_load_acq_long(&sc->chip_tq_flags); switch (work) { case CHIP_TQ_REINIT: if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { /* restart the interface */ BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_stop_locked(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } break; default: break; } } /* * Handles any IOCTL calls from the operating system. * * Returns: * 0 = Success, >0 Failure */ static int bxe_ioctl(if_t ifp, u_long command, caddr_t data) { struct bxe_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; int mask = 0; int reinit = 0; int error = 0; int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); switch (command) { case SIOCSIFMTU: BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", ifr->ifr_mtu); if (sc->mtu == ifr->ifr_mtu) { /* nothing to change */ break; } if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", ifr->ifr_mtu, mtu_min, mtu_max); error = EINVAL; break; } atomic_store_rel_int((volatile unsigned int *)&sc->mtu, (unsigned long)ifr->ifr_mtu); /* atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), (unsigned long)ifr->ifr_mtu); XXX - Not sure why it needs to be atomic */ if_setmtu(ifp, ifr->ifr_mtu); reinit = 1; break; case SIOCSIFFLAGS: /* toggle the interface state up or down */ BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); BXE_CORE_LOCK(sc); /* check if the interface is up */ if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* set the receive mode flags */ bxe_set_rx_mode(sc); } else if(sc->state != BXE_STATE_DISABLED) { bxe_init_locked(sc); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); bxe_stop_locked(sc); } } BXE_CORE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* add/delete multicast addresses */ BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); /* check if the interface is up */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* set the receive mode flags */ BXE_CORE_LOCK(sc); bxe_set_rx_mode(sc); BXE_CORE_UNLOCK(sc); } break; case SIOCSIFCAP: /* find out which capabilities have changed */ mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", mask); /* toggle the LRO capabilites enable flag */ if (mask & IFCAP_LRO) { if_togglecapenable(ifp, IFCAP_LRO); BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); reinit = 1; } /* toggle the TXCSUM checksum capabilites enable flag */ if (mask & IFCAP_TXCSUM) { if_togglecapenable(ifp, IFCAP_TXCSUM); BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); if (if_getcapenable(ifp) & IFCAP_TXCSUM) { if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); } else { if_clearhwassist(ifp); /* XXX */ } } /* toggle the RXCSUM checksum capabilities enable flag */ if (mask & IFCAP_RXCSUM) { if_togglecapenable(ifp, IFCAP_RXCSUM); BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); if (if_getcapenable(ifp) & IFCAP_RXCSUM) { if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); } else { if_clearhwassist(ifp); /* XXX */ } } /* toggle TSO4 capabilities enabled flag */ if (mask & IFCAP_TSO4) { if_togglecapenable(ifp, IFCAP_TSO4); BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); } /* toggle TSO6 capabilities enabled flag */ if (mask & IFCAP_TSO6) { if_togglecapenable(ifp, IFCAP_TSO6); BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); } /* toggle VLAN_HWTSO capabilities enabled flag */ if (mask & IFCAP_VLAN_HWTSO) { if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); } /* toggle VLAN_HWCSUM capabilities enabled flag */ if (mask & IFCAP_VLAN_HWCSUM) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); error = EINVAL; } /* toggle VLAN_MTU capabilities enable flag */ if (mask & IFCAP_VLAN_MTU) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); error = EINVAL; } /* toggle VLAN_HWTAGGING capabilities enabled flag */ if (mask & IFCAP_VLAN_HWTAGGING) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); error = EINVAL; } /* toggle VLAN_HWFILTER capabilities enabled flag */ if (mask & IFCAP_VLAN_HWFILTER) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); error = EINVAL; } /* XXX not yet... * IFCAP_WOL_MAGIC */ break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: /* set/get interface media */ BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", (command & 0xff)); error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; default: BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", (command & 0xff)); error = ether_ioctl(ifp, command, data); break; } if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { BLOGD(sc, DBG_LOAD | DBG_IOCTL, "Re-initializing hardware from IOCTL change\n"); bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_stop_locked(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } return (error); } static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m, uint8_t contents) { char * type; int i = 0; if (!(sc->debug & DBG_MBUF)) { return; } if (m == NULL) { BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); return; } while (m) { BLOGD(sc, DBG_MBUF, "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); if (m->m_flags & M_PKTHDR) { BLOGD(sc, DBG_MBUF, "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, (int)m->m_pkthdr.csum_flags, CSUM_BITS); } if (m->m_flags & M_EXT) { switch (m->m_ext.ext_type) { case EXT_CLUSTER: type = "EXT_CLUSTER"; break; case EXT_SFBUF: type = "EXT_SFBUF"; break; case EXT_JUMBOP: type = "EXT_JUMBOP"; break; case EXT_JUMBO9: type = "EXT_JUMBO9"; break; case EXT_JUMBO16: type = "EXT_JUMBO16"; break; case EXT_PACKET: type = "EXT_PACKET"; break; case EXT_MBUF: type = "EXT_MBUF"; break; case EXT_NET_DRV: type = "EXT_NET_DRV"; break; case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; case EXT_EXTREF: type = "EXT_EXTREF"; break; default: type = "UNKNOWN"; break; } BLOGD(sc, DBG_MBUF, "%02d: - m_ext: %p ext_size=%d type=%s\n", i, m->m_ext.ext_buf, m->m_ext.ext_size, type); } if (contents) { bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); } m = m->m_next; i++; } } /* * Checks to ensure the 13 bd sliding window is >= MSS for TSO. * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD * The headers comes in a separate bd in FreeBSD so 13-3=10. * Returns: 0 if OK to send, 1 if packet needs further defragmentation */ static int bxe_chktso_window(struct bxe_softc *sc, int nsegs, bus_dma_segment_t *segs, struct mbuf *m) { uint32_t num_wnds, wnd_size, wnd_sum; int32_t frag_idx, wnd_idx; unsigned short lso_mss; int defrag; defrag = 0; wnd_sum = 0; wnd_size = 10; num_wnds = nsegs - wnd_size; lso_mss = htole16(m->m_pkthdr.tso_segsz); /* * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the * first window sum of data while skipping the first assuming it is the * header in FreeBSD. */ for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { wnd_sum += htole16(segs[frag_idx].ds_len); } /* check the first 10 bd window size */ if (wnd_sum < lso_mss) { return (1); } /* run through the windows */ for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { /* subtract the first mbuf->m_len of the last wndw(-header) */ wnd_sum -= htole16(segs[wnd_idx+1].ds_len); /* add the next mbuf len to the len of our new window */ wnd_sum += htole16(segs[frag_idx].ds_len); if (wnd_sum < lso_mss) { return (1); } } return (0); } static uint8_t bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, struct mbuf *m, uint32_t *parsing_data) { struct ether_vlan_header *eh = NULL; struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; caddr_t ip = NULL; struct tcphdr *th = NULL; int e_hlen, ip_hlen, l4_off; uint16_t proto; if (m->m_pkthdr.csum_flags == CSUM_IP) { /* no L4 checksum offload needed */ return (0); } /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); proto = ntohs(eh->evl_proto); } else { e_hlen = ETHER_HDR_LEN; proto = ntohs(eh->evl_encap_proto); } switch (proto) { case ETHERTYPE_IP: /* get the IP header, if mbuf len < 20 then header in next mbuf */ ip4 = (m->m_len < sizeof(struct ip)) ? (struct ip *)m->m_next->m_data : (struct ip *)(m->m_data + e_hlen); /* ip_hl is number of 32-bit words */ ip_hlen = (ip4->ip_hl << 2); ip = (caddr_t)ip4; break; case ETHERTYPE_IPV6: /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? (struct ip6_hdr *)m->m_next->m_data : (struct ip6_hdr *)(m->m_data + e_hlen); /* XXX cannot support offload with IPv6 extensions */ ip_hlen = sizeof(struct ip6_hdr); ip = (caddr_t)ip6; break; default: /* We can't offload in this case... */ /* XXX error stat ??? */ return (0); } /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ l4_off = (e_hlen + ip_hlen); *parsing_data |= (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_tcp++; th = (struct tcphdr *)(ip + ip_hlen); /* th_off is number of 32-bit words */ *parsing_data |= ((th->th_off << ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); return (l4_off + (th->th_off << 2)); /* entire header length */ } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_udp++; return (l4_off + sizeof(struct udphdr)); /* entire header length */ } else { /* XXX error stat ??? */ return (0); } } static uint8_t bxe_set_pbd_csum(struct bxe_fastpath *fp, struct mbuf *m, struct eth_tx_parse_bd_e1x *pbd) { struct ether_vlan_header *eh = NULL; struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; caddr_t ip = NULL; struct tcphdr *th = NULL; struct udphdr *uh = NULL; int e_hlen, ip_hlen; uint16_t proto; uint8_t hlen; uint16_t tmp_csum; uint32_t *tmp_uh; /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); proto = ntohs(eh->evl_proto); } else { e_hlen = ETHER_HDR_LEN; proto = ntohs(eh->evl_encap_proto); } switch (proto) { case ETHERTYPE_IP: /* get the IP header, if mbuf len < 20 then header in next mbuf */ ip4 = (m->m_len < sizeof(struct ip)) ? (struct ip *)m->m_next->m_data : (struct ip *)(m->m_data + e_hlen); /* ip_hl is number of 32-bit words */ ip_hlen = (ip4->ip_hl << 1); ip = (caddr_t)ip4; break; case ETHERTYPE_IPV6: /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? (struct ip6_hdr *)m->m_next->m_data : (struct ip6_hdr *)(m->m_data + e_hlen); /* XXX cannot support offload with IPv6 extensions */ ip_hlen = (sizeof(struct ip6_hdr) >> 1); ip = (caddr_t)ip6; break; default: /* We can't offload in this case... */ /* XXX error stat ??? */ return (0); } hlen = (e_hlen >> 1); /* note that rest of global_data is indirectly zeroed here */ if (m->m_flags & M_VLANTAG) { pbd->global_data = htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); } else { pbd->global_data = htole16(hlen); } pbd->ip_hlen_w = ip_hlen; hlen += pbd->ip_hlen_w; /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { th = (struct tcphdr *)(ip + (ip_hlen << 1)); /* th_off is number of 32-bit words */ hlen += (uint16_t)(th->th_off << 1); } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { uh = (struct udphdr *)(ip + (ip_hlen << 1)); hlen += (sizeof(struct udphdr) / 2); } else { /* valid case as only CSUM_IP was set */ return (0); } pbd->total_hlen_w = htole16(hlen); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_tcp++; pbd->tcp_pseudo_csum = ntohs(th->th_sum); } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_udp++; /* * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP * checksums and does not know anything about the UDP header and where * the checksum field is located. It only knows about TCP. Therefore * we "lie" to the hardware for outgoing UDP packets w/ checksum * offload. Since the checksum field offset for TCP is 16 bytes and * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 * bytes less than the start of the UDP header. This allows the * hardware to write the checksum in the correct spot. But the * hardware will compute a checksum which includes the last 10 bytes * of the IP header. To correct this we tweak the stack computed * pseudo checksum by folding in the calculation of the inverse * checksum for those final 10 bytes of the IP header. This allows * the correct checksum to be computed by the hardware. */ /* set pointer 10 bytes before UDP header */ tmp_uh = (uint32_t *)((uint8_t *)uh - 10); /* calculate a pseudo header checksum over the first 10 bytes */ tmp_csum = in_pseudo(*tmp_uh, *(tmp_uh + 1), *(uint16_t *)(tmp_uh + 2)); pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); } return (hlen * 2); /* entire header length, number of bytes */ } static void bxe_set_pbd_lso_e2(struct mbuf *m, uint32_t *parsing_data) { *parsing_data |= ((m->m_pkthdr.tso_segsz << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & ETH_TX_PARSE_BD_E2_LSO_MSS); /* XXX test for IPv6 with extension header... */ } static void bxe_set_pbd_lso(struct mbuf *m, struct eth_tx_parse_bd_e1x *pbd) { struct ether_vlan_header *eh = NULL; struct ip *ip = NULL; struct tcphdr *th = NULL; int e_hlen; /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; /* get the IP and TCP header, with LSO entire header in first mbuf */ /* XXX assuming IPv4 */ ip = (struct ip *)(m->m_data + e_hlen); th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); pbd->tcp_send_seq = ntohl(th->th_seq); pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); #if 1 /* XXX IPv4 */ pbd->ip_id = ntohs(ip->ip_id); pbd->tcp_pseudo_csum = ntohs(in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP))); #else /* XXX IPv6 */ pbd->tcp_pseudo_csum = ntohs(in_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htons(IPPROTO_TCP))); #endif pbd->global_data |= htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); } /* * Encapsulte an mbuf cluster into the tx bd chain and makes the memory * visible to the controller. * * If an mbuf is submitted to this routine and cannot be given to the * controller (e.g. it has too many fragments) then the function may free * the mbuf and return to the caller. * * Returns: * 0 = Success, !0 = Failure * Note the side effect that an mbuf may be freed if it causes a problem. */ static int bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) { bus_dma_segment_t segs[32]; struct mbuf *m0; struct bxe_sw_tx_bd *tx_buf; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ struct eth_tx_bd *tx_data_bd; struct eth_tx_bd *tx_total_pkt_size_bd; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_prod, pkt_prod, total_pkt_size; uint8_t mac_type; int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; struct bxe_softc *sc; uint16_t tx_bd_avail; struct ether_vlan_header *eh; uint32_t pbd_e2_parsing_data = 0; uint8_t hlen = 0; int tmp_bd; int i; sc = fp->sc; M_ASSERTPKTHDR(*m_head); m0 = *m_head; rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; tx_start_bd = NULL; tx_data_bd = NULL; tx_total_pkt_size_bd = NULL; /* get the H/W pointer for packets and BDs */ pkt_prod = fp->tx_pkt_prod; bd_prod = fp->tx_bd_prod; mac_type = UNICAST_ADDRESS; /* map the mbuf into the next open DMAable memory */ tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); /* mapping errors */ if(__predict_false(error != 0)) { fp->eth_q_stats.tx_dma_mapping_failure++; if (error == ENOMEM) { /* resource issue, try again later */ rc = ENOMEM; } else if (error == EFBIG) { /* possibly recoverable with defragmentation */ fp->eth_q_stats.mbuf_defrag_attempts++; m0 = m_defrag(*m_head, M_NOWAIT); if (m0 == NULL) { fp->eth_q_stats.mbuf_defrag_failures++; rc = ENOBUFS; } else { /* defrag successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { fp->eth_q_stats.tx_dma_mapping_failure++; rc = error; } } } else { /* unknown, unrecoverable mapping error */ BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); bxe_dump_mbuf(sc, m0, FALSE); rc = error; } goto bxe_tx_encap_continue; } tx_bd_avail = bxe_tx_avail(sc, fp); /* make sure there is enough room in the send queue */ if (__predict_false(tx_bd_avail < (nsegs + 2))) { /* Recoverable, try again later. */ fp->eth_q_stats.tx_hw_queue_full++; bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); rc = ENOMEM; goto bxe_tx_encap_continue; } /* capture the current H/W TX chain high watermark */ if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < (TX_BD_USABLE - tx_bd_avail))) { fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); } /* make sure it fits in the packet window */ if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { /* * The mbuf may be to big for the controller to handle. If the frame * is a TSO frame we'll need to do an additional check. */ if (m0->m_pkthdr.csum_flags & CSUM_TSO) { if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { goto bxe_tx_encap_continue; /* OK to send */ } else { fp->eth_q_stats.tx_window_violation_tso++; } } else { fp->eth_q_stats.tx_window_violation_std++; } /* lets try to defragment this mbuf and remap it */ fp->eth_q_stats.mbuf_defrag_attempts++; bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); m0 = m_defrag(*m_head, M_NOWAIT); if (m0 == NULL) { fp->eth_q_stats.mbuf_defrag_failures++; /* Ugh, just drop the frame... :( */ rc = ENOBUFS; } else { /* defrag successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { fp->eth_q_stats.tx_dma_mapping_failure++; /* No sense in trying to defrag/copy chain, drop it. :( */ rc = error; } else { /* if the chain is still too long then drop it */ if(m0->m_pkthdr.csum_flags & CSUM_TSO) { /* * in case TSO is enabled nsegs should be checked against * BXE_TSO_MAX_SEGMENTS */ if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) { bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); fp->eth_q_stats.nsegs_path1_errors++; rc = ENODEV; } } else { if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); fp->eth_q_stats.nsegs_path2_errors++; rc = ENODEV; } } } } } bxe_tx_encap_continue: /* Check for errors */ if (rc) { if (rc == ENOMEM) { /* recoverable try again later */ } else { fp->eth_q_stats.tx_soft_errors++; fp->eth_q_stats.mbuf_alloc_tx--; m_freem(*m_head); *m_head = NULL; } return (rc); } /* set flag according to packet type (UNICAST_ADDRESS is default) */ if (m0->m_flags & M_BCAST) { mac_type = BROADCAST_ADDRESS; } else if (m0->m_flags & M_MCAST) { mac_type = MULTICAST_ADDRESS; } /* store the mbuf into the mbuf ring */ tx_buf->m = m0; tx_buf->first_bd = fp->tx_bd_prod; tx_buf->flags = 0; /* prepare the first transmit (start) BD for the mbuf */ tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; BLOGD(sc, DBG_TX, "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); tx_start_bd->nbytes = htole16(segs[0].ds_len); total_pkt_size += tx_start_bd->nbytes; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); /* all frames have at least Start BD + Parsing BD */ nbds = nsegs + 1; tx_start_bd->nbd = htole16(nbds); if (m0->m_flags & M_VLANTAG) { tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); tx_start_bd->bd_flags.as_bitfield |= (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); } else { /* vf tx, start bd must hold the ethertype for fw to enforce it */ if (IS_VF(sc)) { /* map ethernet header to find type and header length */ eh = mtod(m0, struct ether_vlan_header *); tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); } } /* * add a parsing BD from the chain. The parsing BD is always added * though it is only used for TSO and chksum */ bd_prod = TX_BD_NEXT(bd_prod); if (m0->m_pkthdr.csum_flags) { if (m0->m_pkthdr.csum_flags & CSUM_IP) { fp->eth_q_stats.tx_ofld_frames_csum_ip++; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; } if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | ETH_TX_BD_FLAGS_L4_CSUM); } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | ETH_TX_BD_FLAGS_IS_UDP | ETH_TX_BD_FLAGS_L4_CSUM); } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) { tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | ETH_TX_BD_FLAGS_IS_UDP); } } if (!CHIP_IS_E1x(sc)) { pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); if (m0->m_pkthdr.csum_flags) { hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); } SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); } else { uint16_t global_data = 0; pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); if (m0->m_pkthdr.csum_flags) { hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); } SET_FLAG(global_data, ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); pbd_e1x->global_data |= htole16(global_data); } /* setup the parsing BD with TSO specific info */ if (m0->m_pkthdr.csum_flags & CSUM_TSO) { fp->eth_q_stats.tx_ofld_frames_lso++; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; if (__predict_false(tx_start_bd->nbytes > hlen)) { fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; /* split the first BD into header/data making the fw job easy */ nbds++; tx_start_bd->nbd = htole16(nbds); tx_start_bd->nbytes = htole16(hlen); bd_prod = TX_BD_NEXT(bd_prod); /* new transmit BD after the tx_parse_bd */ tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); if (tx_total_pkt_size_bd == NULL) { tx_total_pkt_size_bd = tx_data_bd; } BLOGD(sc, DBG_TX, "TSO split header size is %d (%x:%x) nbds %d\n", le16toh(tx_start_bd->nbytes), le32toh(tx_start_bd->addr_hi), le32toh(tx_start_bd->addr_lo), nbds); } if (!CHIP_IS_E1x(sc)) { bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); } else { bxe_set_pbd_lso(m0, pbd_e1x); } } if (pbd_e2_parsing_data) { pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); } /* prepare remaining BDs, start tx bd contains first seg/frag */ for (i = 1; i < nsegs ; i++) { bd_prod = TX_BD_NEXT(bd_prod); tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); tx_data_bd->nbytes = htole16(segs[i].ds_len); if (tx_total_pkt_size_bd == NULL) { tx_total_pkt_size_bd = tx_data_bd; } total_pkt_size += tx_data_bd->nbytes; } BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); if (tx_total_pkt_size_bd != NULL) { tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; } if (__predict_false(sc->debug & DBG_TX)) { tmp_bd = tx_buf->first_bd; for (i = 0; i < nbds; i++) { if (i == 0) { BLOGD(sc, DBG_TX, "TX Strt: %p bd=%d nbd=%d vlan=0x%x " "bd_flags=0x%x hdr_nbds=%d\n", tx_start_bd, tmp_bd, le16toh(tx_start_bd->nbd), le16toh(tx_start_bd->vlan_or_ethertype), tx_start_bd->bd_flags.as_bitfield, (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); } else if (i == 1) { if (pbd_e1x) { BLOGD(sc, DBG_TX, "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " "tcp_seq=%u total_hlen_w=%u\n", pbd_e1x, tmp_bd, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, le16toh(pbd_e1x->total_hlen_w)); } else { /* if (pbd_e2) */ BLOGD(sc, DBG_TX, "-> Parse: %p bd=%d dst=%02x:%02x:%02x " "src=%02x:%02x:%02x parsing_data=0x%x\n", pbd_e2, tmp_bd, pbd_e2->data.mac_addr.dst_hi, pbd_e2->data.mac_addr.dst_mid, pbd_e2->data.mac_addr.dst_lo, pbd_e2->data.mac_addr.src_hi, pbd_e2->data.mac_addr.src_mid, pbd_e2->data.mac_addr.src_lo, pbd_e2->parsing_data); } } if (i != 1) { /* skip parse db as it doesn't hold data */ tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; BLOGD(sc, DBG_TX, "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", tx_data_bd, tmp_bd, le16toh(tx_data_bd->nbytes), le32toh(tx_data_bd->addr_hi), le32toh(tx_data_bd->addr_lo)); } tmp_bd = TX_BD_NEXT(tmp_bd); } } BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); /* update TX BD producer index value for next TX */ bd_prod = TX_BD_NEXT(bd_prod); /* * If the chain of tx_bd's describing this frame is adjacent to or spans * an eth_tx_next_bd element then we need to increment the nbds value. */ if (TX_BD_IDX(bd_prod) < nbds) { nbds++; } /* don't allow reordering of writes for nbd and packets */ mb(); fp->tx_db.data.prod += nbds; /* producer points to the next free tx_bd at this point */ fp->tx_pkt_prod++; fp->tx_bd_prod = bd_prod; DOORBELL(sc, fp->index, fp->tx_db.raw); fp->eth_q_stats.tx_pkts++; /* Prevent speculative reads from getting ahead of the status block. */ bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_READ); /* Prevent speculative reads from getting ahead of the doorbell. */ bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 0, 0, BUS_SPACE_BARRIER_READ); return (0); } static void bxe_tx_start_locked(struct bxe_softc *sc, if_t ifp, struct bxe_fastpath *fp) { struct mbuf *m = NULL; int tx_count = 0; uint16_t tx_bd_avail; BXE_FP_TX_LOCK_ASSERT(fp); /* keep adding entries while there are frames to send */ while (!if_sendq_empty(ifp)) { /* * check for any frames to send * dequeue can still be NULL even if queue is not empty */ m = if_dequeue(ifp); if (__predict_false(m == NULL)) { break; } /* the mbuf now belongs to us */ fp->eth_q_stats.mbuf_alloc_tx++; /* * Put the frame into the transmit ring. If we don't have room, * place the mbuf back at the head of the TX queue, set the * OACTIVE flag, and wait for the NIC to drain the chain. */ if (__predict_false(bxe_tx_encap(fp, &m))) { fp->eth_q_stats.tx_encap_failures++; if (m != NULL) { /* mark the TX queue as full and return the frame */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); if_sendq_prepend(ifp, m); fp->eth_q_stats.mbuf_alloc_tx--; fp->eth_q_stats.tx_queue_xoff++; } /* stop looking for more work */ break; } /* the frame was enqueued successfully */ tx_count++; /* send a copy of the frame to any BPF listeners. */ if_etherbpfmtap(ifp, m); tx_bd_avail = bxe_tx_avail(sc, fp); /* handle any completions if we're running low */ if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ bxe_txeof(sc, fp); if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { break; } } } /* all TX packets were dequeued and/or the tx ring is full */ if (tx_count > 0) { /* reset the TX watchdog timeout timer */ fp->watchdog_timer = BXE_TX_TIMEOUT; } } /* Legacy (non-RSS) dispatch routine */ static void bxe_tx_start(if_t ifp) { struct bxe_softc *sc; struct bxe_fastpath *fp; sc = if_getsoftc(ifp); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BLOGW(sc, "Interface not running, ignoring transmit request\n"); return; } if (!sc->link_vars.link_up) { BLOGW(sc, "Interface link is down, ignoring transmit request\n"); return; } fp = &sc->fp[0]; if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { fp->eth_q_stats.tx_queue_full_return++; return; } BXE_FP_TX_LOCK(fp); bxe_tx_start_locked(sc, ifp, fp); BXE_FP_TX_UNLOCK(fp); } static int bxe_tx_mq_start_locked(struct bxe_softc *sc, if_t ifp, struct bxe_fastpath *fp, struct mbuf *m) { struct buf_ring *tx_br = fp->tx_br; struct mbuf *next; int depth, rc, tx_count; uint16_t tx_bd_avail; rc = tx_count = 0; BXE_FP_TX_LOCK_ASSERT(fp); if (sc->state != BXE_STATE_OPEN) { fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; return ENETDOWN; } if (!tx_br) { BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); return (EINVAL); } if (m != NULL) { rc = drbr_enqueue(ifp, tx_br, m); if (rc != 0) { fp->eth_q_stats.tx_soft_errors++; goto bxe_tx_mq_start_locked_exit; } } if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { fp->eth_q_stats.tx_request_link_down_failures++; goto bxe_tx_mq_start_locked_exit; } /* fetch the depth of the driver queue */ depth = drbr_inuse_drv(ifp, tx_br); if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { fp->eth_q_stats.tx_max_drbr_queue_depth = depth; } /* keep adding entries while there are frames to send */ while ((next = drbr_peek(ifp, tx_br)) != NULL) { /* handle any completions if we're running low */ tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ bxe_txeof(sc, fp); tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) { fp->eth_q_stats.bd_avail_too_less_failures++; m_freem(next); drbr_advance(ifp, tx_br); rc = ENOBUFS; break; } } /* the mbuf now belongs to us */ fp->eth_q_stats.mbuf_alloc_tx++; /* * Put the frame into the transmit ring. If we don't have room, * place the mbuf back at the head of the TX queue, set the * OACTIVE flag, and wait for the NIC to drain the chain. */ rc = bxe_tx_encap(fp, &next); if (__predict_false(rc != 0)) { fp->eth_q_stats.tx_encap_failures++; if (next != NULL) { /* mark the TX queue as full and save the frame */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); drbr_putback(ifp, tx_br, next); fp->eth_q_stats.mbuf_alloc_tx--; fp->eth_q_stats.tx_frames_deferred++; } else drbr_advance(ifp, tx_br); /* stop looking for more work */ break; } /* the transmit frame was enqueued successfully */ tx_count++; /* send a copy of the frame to any BPF listeners */ if_etherbpfmtap(ifp, next); drbr_advance(ifp, tx_br); } /* all TX packets were dequeued and/or the tx ring is full */ if (tx_count > 0) { /* reset the TX watchdog timeout timer */ fp->watchdog_timer = BXE_TX_TIMEOUT; } bxe_tx_mq_start_locked_exit: /* If we didn't drain the drbr, enqueue a task in the future to do it. */ if (!drbr_empty(ifp, tx_br)) { fp->eth_q_stats.tx_mq_not_empty++; taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1); } return (rc); } static void bxe_tx_mq_start_deferred(void *arg, int pending) { struct bxe_fastpath *fp = (struct bxe_fastpath *)arg; struct bxe_softc *sc = fp->sc; if_t ifp = sc->ifp; BXE_FP_TX_LOCK(fp); bxe_tx_mq_start_locked(sc, ifp, fp, NULL); BXE_FP_TX_UNLOCK(fp); } /* Multiqueue (TSS) dispatch routine. */ static int bxe_tx_mq_start(struct ifnet *ifp, struct mbuf *m) { struct bxe_softc *sc = if_getsoftc(ifp); struct bxe_fastpath *fp; int fp_index, rc; fp_index = 0; /* default is the first queue */ /* check if flowid is set */ if (BXE_VALID_FLOWID(m)) fp_index = (m->m_pkthdr.flowid % sc->num_queues); fp = &sc->fp[fp_index]; if (sc->state != BXE_STATE_OPEN) { fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; return ENETDOWN; } if (BXE_FP_TX_TRYLOCK(fp)) { rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); BXE_FP_TX_UNLOCK(fp); } else { rc = drbr_enqueue(ifp, fp->tx_br, m); taskqueue_enqueue(fp->tq, &fp->tx_task); } return (rc); } static void bxe_mq_flush(struct ifnet *ifp) { struct bxe_softc *sc = if_getsoftc(ifp); struct bxe_fastpath *fp; struct mbuf *m; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->state != BXE_FP_STATE_IRQ) { BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", fp->index, fp->state); continue; } if (fp->tx_br != NULL) { BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); BXE_FP_TX_LOCK(fp); while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { m_freem(m); } BXE_FP_TX_UNLOCK(fp); } } if_qflush(ifp); } static uint16_t bxe_cid_ilt_lines(struct bxe_softc *sc) { if (IS_SRIOV(sc)) { return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); } return (L2_ILT_LINES(sc)); } static void bxe_ilt_set_info(struct bxe_softc *sc) { struct ilt_client_info *ilt_client; struct ecore_ilt *ilt = sc->ilt; uint16_t line = 0; ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); /* CDU */ ilt_client = &ilt->clients[ILT_CLIENT_CDU]; ilt_client->client_num = ILT_CLIENT_CDU; ilt_client->page_size = CDU_ILT_PAGE_SZ; ilt_client->flags = ILT_CLIENT_SKIP_MEM; ilt_client->start = line; line += bxe_cid_ilt_lines(sc); if (CNIC_SUPPORT(sc)) { line += CNIC_ILT_LINES; } ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[CDU]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* QM */ if (QM_INIT(sc->qm_cid_count)) { ilt_client = &ilt->clients[ILT_CLIENT_QM]; ilt_client->client_num = ILT_CLIENT_QM; ilt_client->page_size = QM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; /* 4 bytes for each cid */ line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, QM_ILT_PAGE_SZ); ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[QM]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } if (CNIC_SUPPORT(sc)) { /* SRC */ ilt_client = &ilt->clients[ILT_CLIENT_SRC]; ilt_client->client_num = ILT_CLIENT_SRC; ilt_client->page_size = SRC_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += SRC_ILT_LINES; ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[SRC]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* TM */ ilt_client = &ilt->clients[ILT_CLIENT_TM]; ilt_client->client_num = ILT_CLIENT_TM; ilt_client->page_size = TM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += TM_ILT_LINES; ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[TM]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); } static void bxe_set_fp_rx_buf_size(struct bxe_softc *sc) { int i; uint32_t rx_buf_size; rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); for (i = 0; i < sc->num_queues; i++) { if(rx_buf_size <= MCLBYTES){ sc->fp[i].rx_buf_size = rx_buf_size; sc->fp[i].mbuf_alloc_size = MCLBYTES; }else if (rx_buf_size <= MJUMPAGESIZE){ sc->fp[i].rx_buf_size = rx_buf_size; sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ sc->fp[i].rx_buf_size = MCLBYTES; sc->fp[i].mbuf_alloc_size = MCLBYTES; }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ sc->fp[i].rx_buf_size = MJUMPAGESIZE; sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; }else { sc->fp[i].rx_buf_size = MCLBYTES; sc->fp[i].mbuf_alloc_size = MCLBYTES; } } } static int bxe_alloc_ilt_mem(struct bxe_softc *sc) { int rc = 0; if ((sc->ilt = (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), M_BXE_ILT, (M_NOWAIT | M_ZERO))) == NULL) { rc = 1; } return (rc); } static int bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) { int rc = 0; if ((sc->ilt->lines = (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), M_BXE_ILT, (M_NOWAIT | M_ZERO))) == NULL) { rc = 1; } return (rc); } static void bxe_free_ilt_mem(struct bxe_softc *sc) { if (sc->ilt != NULL) { free(sc->ilt, M_BXE_ILT); sc->ilt = NULL; } } static void bxe_free_ilt_lines_mem(struct bxe_softc *sc) { if (sc->ilt->lines != NULL) { free(sc->ilt->lines, M_BXE_ILT); sc->ilt->lines = NULL; } } static void bxe_free_mem(struct bxe_softc *sc) { int i; for (i = 0; i < L2_ILT_LINES(sc); i++) { bxe_dma_free(sc, &sc->context[i].vcxt_dma); sc->context[i].vcxt = NULL; sc->context[i].size = 0; } ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); bxe_free_ilt_lines_mem(sc); } static int bxe_alloc_mem(struct bxe_softc *sc) { int context_size; int allocated; int i; /* * Allocate memory for CDU context: * This memory is allocated separately and not in the generic ILT * functions because CDU differs in few aspects: * 1. There can be multiple entities allocating memory for context - * regular L2, CNIC, and SRIOV drivers. Each separately controls * its own ILT lines. * 2. Since CDU page-size is not a single 4KB page (which is the case * for the other ILT clients), to be efficient we want to support * allocation of sub-page-size in the last entry. * 3. Context pointers are used by the driver to pass to FW / update * the context (for the other ILT clients the pointers are used just to * free the memory during unload). */ context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); for (i = 0, allocated = 0; allocated < context_size; i++) { sc->context[i].size = min(CDU_ILT_PAGE_SZ, (context_size - allocated)); if (bxe_dma_alloc(sc, sc->context[i].size, &sc->context[i].vcxt_dma, "cdu context") != 0) { bxe_free_mem(sc); return (-1); } sc->context[i].vcxt = (union cdu_context *)sc->context[i].vcxt_dma.vaddr; allocated += sc->context[i].size; } bxe_alloc_ilt_lines_mem(sc); BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", sc->ilt, sc->ilt->start_line, sc->ilt->lines); { for (i = 0; i < 4; i++) { BLOGD(sc, DBG_LOAD, "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", i, sc->ilt->clients[i].page_size, sc->ilt->clients[i].start, sc->ilt->clients[i].end, sc->ilt->clients[i].client_num, sc->ilt->clients[i].flags); } } if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); bxe_free_mem(sc); return (-1); } return (0); } static void bxe_free_rx_bd_chain(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i; sc = fp->sc; if (fp->rx_mbuf_tag == NULL) { return; } /* free all mbufs and unload all maps */ for (i = 0; i < RX_BD_TOTAL; i++) { if (fp->rx_mbuf_chain[i].m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_mbuf_chain[i].m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_chain[i].m_map); } if (fp->rx_mbuf_chain[i].m != NULL) { m_freem(fp->rx_mbuf_chain[i].m); fp->rx_mbuf_chain[i].m = NULL; fp->eth_q_stats.mbuf_alloc_rx--; } } } static void bxe_free_tpa_pool(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i, max_agg_queues; sc = fp->sc; if (fp->rx_mbuf_tag == NULL) { return; } max_agg_queues = MAX_AGG_QS(sc); /* release all mbufs and unload all DMA maps in the TPA pool */ for (i = 0; i < max_agg_queues; i++) { if (fp->rx_tpa_info[i].bd.m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_tpa_info[i].bd.m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info[i].bd.m_map); } if (fp->rx_tpa_info[i].bd.m != NULL) { m_freem(fp->rx_tpa_info[i].bd.m); fp->rx_tpa_info[i].bd.m = NULL; fp->eth_q_stats.mbuf_alloc_tpa--; } } } static void bxe_free_sge_chain(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i; sc = fp->sc; if (fp->rx_sge_mbuf_tag == NULL) { return; } /* rree all mbufs and unload all maps */ for (i = 0; i < RX_SGE_TOTAL; i++) { if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { bus_dmamap_sync(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[i].m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[i].m_map); } if (fp->rx_sge_mbuf_chain[i].m != NULL) { m_freem(fp->rx_sge_mbuf_chain[i].m); fp->rx_sge_mbuf_chain[i].m = NULL; fp->eth_q_stats.mbuf_alloc_sge--; } } } static void bxe_free_fp_buffers(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tx_br != NULL) { /* just in case bxe_mq_flush() wasn't called */ if (mtx_initialized(&fp->tx_mtx)) { struct mbuf *m; BXE_FP_TX_LOCK(fp); while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) m_freem(m); BXE_FP_TX_UNLOCK(fp); } } /* free all RX buffers */ bxe_free_rx_bd_chain(fp); bxe_free_tpa_pool(fp); bxe_free_sge_chain(fp); if (fp->eth_q_stats.mbuf_alloc_rx != 0) { BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_rx); } if (fp->eth_q_stats.mbuf_alloc_sge != 0) { BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_sge); } if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_tpa); } if (fp->eth_q_stats.mbuf_alloc_tx != 0) { BLOGE(sc, "failed to release tx mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_tx); } /* XXX verify all mbufs were reclaimed */ } } static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t prev_index, uint16_t index) { struct bxe_sw_rx_bd *rx_buf; struct eth_rx_bd *rx_bd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs, rc; rc = 0; /* allocate the new RX BD mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; return (ENOBUFS); } fp->eth_q_stats.mbuf_alloc_rx++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = fp->rx_buf_size; /* map the mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; m_freem(m); fp->eth_q_stats.mbuf_alloc_rx--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); /* release any existing RX BD mbuf mappings */ if (prev_index != index) { rx_buf = &fp->rx_mbuf_chain[prev_index]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* * We only get here from bxe_rxeof() when the maximum number * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already * holds the mbuf in the prev_index so it's OK to NULL it out * here without concern of a memory leak. */ fp->rx_mbuf_chain[prev_index].m = NULL; } rx_buf = &fp->rx_mbuf_chain[index]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* save the mbuf and mapping info for a future packet */ map = (prev_index != index) ? fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; rx_buf->m_map = fp->rx_mbuf_spare_map; fp->rx_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_PREREAD); rx_buf->m = m; rx_bd = &fp->rx_chain[index]; rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); return (rc); } static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, int queue) { struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs; int rc = 0; /* allocate the new TPA mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; return (ENOBUFS); } fp->eth_q_stats.mbuf_alloc_tpa++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = fp->rx_buf_size; /* map the mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; m_free(m); fp->eth_q_stats.mbuf_alloc_tpa--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); /* release any existing TPA mbuf mapping */ if (tpa_info->bd.m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); } /* save the mbuf and mapping info for the TPA mbuf */ map = tpa_info->bd.m_map; tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; fp->rx_tpa_info_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, BUS_DMASYNC_PREREAD); tpa_info->bd.m = m; tpa_info->seg = segs[0]; return (rc); } /* * Allocate an mbuf and assign it to the receive scatter gather chain. The * caller must take care to save a copy of the existing mbuf in the SG mbuf * chain. */ static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index) { struct bxe_sw_rx_bd *sge_buf; struct eth_rx_sge *sge; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs; int rc = 0; /* allocate a new SGE mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; return (ENOMEM); } fp->eth_q_stats.mbuf_alloc_sge++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; /* map the SGE mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; m_freem(m); fp->eth_q_stats.mbuf_alloc_sge--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); sge_buf = &fp->rx_sge_mbuf_chain[index]; /* release any existing SGE mbuf mapping */ if (sge_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); } /* save the mbuf and mapping info for a future packet */ map = sge_buf->m_map; sge_buf->m_map = fp->rx_sge_mbuf_spare_map; fp->rx_sge_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, BUS_DMASYNC_PREREAD); sge_buf->m = m; sge = &fp->rx_sge_chain[index]; sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); return (rc); } static __noinline int bxe_alloc_fp_buffers(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i, j, rc = 0; int ring_prod, cqe_ring_prod; int max_agg_queues; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; ring_prod = cqe_ring_prod = 0; fp->rx_bd_cons = 0; fp->rx_cq_cons = 0; /* allocate buffers for the RX BDs in RX BD chain */ for (j = 0; j < sc->max_rx_bufs; j++) { rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", i, rc); goto bxe_alloc_fp_buffers_error; } ring_prod = RX_BD_NEXT(ring_prod); cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); } fp->rx_bd_prod = ring_prod; fp->rx_cq_prod = cqe_ring_prod; fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; max_agg_queues = MAX_AGG_QS(sc); fp->tpa_enable = TRUE; /* fill the TPA pool */ for (j = 0; j < max_agg_queues; j++) { rc = bxe_alloc_rx_tpa_mbuf(fp, j); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", i, j); fp->tpa_enable = FALSE; goto bxe_alloc_fp_buffers_error; } fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; } if (fp->tpa_enable) { /* fill the RX SGE chain */ ring_prod = 0; for (j = 0; j < RX_SGE_USABLE; j++) { rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", i, ring_prod); fp->tpa_enable = FALSE; ring_prod = 0; goto bxe_alloc_fp_buffers_error; } ring_prod = RX_SGE_NEXT(ring_prod); } fp->rx_sge_prod = ring_prod; } } return (0); bxe_alloc_fp_buffers_error: /* unwind what was already allocated */ bxe_free_rx_bd_chain(fp); bxe_free_tpa_pool(fp); bxe_free_sge_chain(fp); return (ENOBUFS); } static void bxe_free_fw_stats_mem(struct bxe_softc *sc) { bxe_dma_free(sc, &sc->fw_stats_dma); sc->fw_stats_num = 0; sc->fw_stats_req_size = 0; sc->fw_stats_req = NULL; sc->fw_stats_req_mapping = 0; sc->fw_stats_data_size = 0; sc->fw_stats_data = NULL; sc->fw_stats_data_mapping = 0; } static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc) { uint8_t num_queue_stats; int num_groups; /* number of queues for statistics is number of eth queues */ num_queue_stats = BXE_NUM_ETH_QUEUES(sc); /* * Total number of FW statistics requests = * 1 for port stats + 1 for PF stats + num of queues */ sc->fw_stats_num = (2 + num_queue_stats); /* * Request is built from stats_query_header and an array of * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT * rules. The real number or requests is configured in the * stats_query_header. */ num_groups = ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", sc->fw_stats_num, num_groups); sc->fw_stats_req_size = (sizeof(struct stats_query_header) + (num_groups * sizeof(struct stats_query_cmd_group))); /* * Data for statistics requests + stats_counter. * stats_counter holds per-STORM counters that are incremented when * STORM has finished with the current request. Memory for FCoE * offloaded statistics are counted anyway, even if they will not be sent. * VF stats are not accounted for here as the data of VF stats is stored * in memory allocated by the VF, not here. */ sc->fw_stats_data_size = (sizeof(struct stats_counter) + sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + /* sizeof(struct fcoe_statistics_params) + */ (sizeof(struct per_queue_stats) * num_queue_stats)); if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), &sc->fw_stats_dma, "fw stats") != 0) { bxe_free_fw_stats_mem(sc); return (-1); } /* set up the shortcuts */ sc->fw_stats_req = (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; sc->fw_stats_data = (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + sc->fw_stats_req_size); sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + sc->fw_stats_req_size); BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", (uintmax_t)sc->fw_stats_req_mapping); BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", (uintmax_t)sc->fw_stats_data_mapping); return (0); } /* * Bits map: * 0-7 - Engine0 load counter. * 8-15 - Engine1 load counter. * 16 - Engine0 RESET_IN_PROGRESS bit. * 17 - Engine1 RESET_IN_PROGRESS bit. * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active * function on the engine * 19 - Engine1 ONE_IS_LOADED. * 20 - Chip reset flow bit. When set none-leader must wait for both engines * leader to complete (check for both RESET_IN_PROGRESS bits and not * for just the one belonging to its engine). */ #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff #define BXE_PATH0_LOAD_CNT_SHIFT 0 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 #define BXE_PATH1_LOAD_CNT_SHIFT 8 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 #define BXE_GLOBAL_RESET_BIT 0x00040000 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ static void bxe_set_reset_global(struct bxe_softc *sc) { uint32_t val; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ static void bxe_clear_reset_global(struct bxe_softc *sc) { uint32_t val; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ static uint8_t bxe_reset_is_global(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; } /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ static void bxe_set_reset_done(struct bxe_softc *sc) { uint32_t val; uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); /* Clear the bit */ val &= ~bit; REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ static void bxe_set_reset_in_progress(struct bxe_softc *sc) { uint32_t val; uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); /* Set the bit */ val |= bit; REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ static uint8_t bxe_reset_is_done(struct bxe_softc *sc, int engine) { uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; /* return false if bit is set */ return (val & bit) ? FALSE : TRUE; } /* get the load status for an engine, should be run under rtnl lock */ static uint8_t bxe_get_load_status(struct bxe_softc *sc, int engine) { uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); val = ((val & mask) >> shift); BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); return (val != 0); } /* set pf load mark */ /* XXX needs to be under rtnl lock */ static void bxe_set_pf_load(struct bxe_softc *sc) { uint32_t val; uint32_t val1; uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); /* get the current counter value */ val1 = ((val & mask) >> shift); /* set bit of this PF */ val1 |= (1 << SC_ABS_FUNC(sc)); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* clear pf load mark */ /* XXX needs to be under rtnl lock */ static uint8_t bxe_clear_pf_load(struct bxe_softc *sc) { uint32_t val1, val; uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; /* clear bit of that PF */ val1 &= ~(1 << SC_ABS_FUNC(sc)); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); return (val1 != 0); } /* send load requrest to mcp and analyze response */ static int bxe_nic_load_request(struct bxe_softc *sc, uint32_t *load_code) { /* init fw_seq */ sc->fw_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); /* get the current FW pulse sequence */ sc->fw_drv_pulse_wr_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & DRV_PULSE_SEQ_MASK); BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", sc->fw_drv_pulse_wr_seq); /* load request */ (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, DRV_MSG_CODE_LOAD_REQ_WITH_LFA); /* if the MCP fails to respond we must abort */ if (!(*load_code)) { BLOGE(sc, "MCP response failure!\n"); return (-1); } /* if MCP refused then must abort */ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { BLOGE(sc, "MCP refused load request\n"); return (-1); } return (0); } /* * Check whether another PF has already loaded FW to chip. In virtualized * environments a pf from anoth VM may have already initialized the device * including loading FW. */ static int bxe_nic_load_analyze_req(struct bxe_softc *sc, uint32_t load_code) { uint32_t my_fw, loaded_fw; /* is another pf loaded on this engine? */ if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { /* build my FW version dword */ my_fw = (BCM_5710_FW_MAJOR_VERSION + (BCM_5710_FW_MINOR_VERSION << 8 ) + (BCM_5710_FW_REVISION_VERSION << 16) + (BCM_5710_FW_ENGINEERING_VERSION << 24)); /* read loaded FW from chip */ loaded_fw = REG_RD(sc, XSEM_REG_PRAM); BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", loaded_fw, my_fw); /* abort nic load if version mismatch */ if (my_fw != loaded_fw) { BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", loaded_fw, my_fw); return (-1); } } return (0); } /* mark PMF if applicable */ static void bxe_nic_load_pmf(struct bxe_softc *sc, uint32_t load_code) { uint32_t ncsi_oem_data_addr; if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { /* * Barrier here for ordering between the writing to sc->port.pmf here * and reading it from the periodic task. */ sc->port.pmf = 1; mb(); } else { sc->port.pmf = 0; } BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); /* XXX needed? */ if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); if (ncsi_oem_data_addr) { REG_WR(sc, (ncsi_oem_data_addr + offsetof(struct glob_ncsi_oem_data, driver_version)), 0); } } } } static void bxe_read_mf_cfg(struct bxe_softc *sc) { int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); int abs_func; int vn; if (BXE_NOMCP(sc)) { return; /* what should be the default bvalue in this case */ } /* * The formula for computing the absolute function number is... * For 2 port configuration (4 functions per port): * abs_func = 2 * vn + SC_PORT + SC_PATH * For 4 port configuration (2 functions per port): * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH */ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); if (abs_func >= E1H_FUNC_MAX) { break; } sc->devinfo.mf_info.mf_config[vn] = MFCFG_RD(sc, func_mf_config[abs_func].config); } if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); sc->flags |= BXE_MF_FUNC_DIS; } else { BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); sc->flags &= ~BXE_MF_FUNC_DIS; } } /* acquire split MCP access lock register */ static int bxe_acquire_alr(struct bxe_softc *sc) { uint32_t j, val; for (j = 0; j < 1000; j++) { val = (1UL << 31); REG_WR(sc, GRCBASE_MCP + 0x9c, val); val = REG_RD(sc, GRCBASE_MCP + 0x9c); if (val & (1L << 31)) break; DELAY(5000); } if (!(val & (1L << 31))) { BLOGE(sc, "Cannot acquire MCP access lock register\n"); return (-1); } return (0); } /* release split MCP access lock register */ static void bxe_release_alr(struct bxe_softc *sc) { REG_WR(sc, GRCBASE_MCP + 0x9c, 0); } static void bxe_fan_failure(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t ext_phy_config; /* mark the failure */ ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, ext_phy_config); /* log the failure */ BLOGW(sc, "Fan Failure has caused the driver to shutdown " "the card to prevent permanent damage. " "Please contact OEM Support for assistance\n"); /* XXX */ #if 1 bxe_panic(sc, ("Schedule task to handle fan failure\n")); #else /* * Schedule device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); schedule_delayed_work(&sc->sp_rtnl_task, 0); #endif } /* this function is called upon a link interrupt */ static void bxe_link_attn(struct bxe_softc *sc) { uint32_t pause_enabled = 0; struct host_port_stats *pstats; int cmng_fns; struct bxe_fastpath *fp; int i; /* Make sure that we are synced with the current statistics */ bxe_stats_handle(sc, STATS_EVENT_STOP); BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags); elink_link_update(&sc->link_params, &sc->link_vars); if (sc->link_vars.link_up) { /* dropless flow control */ if (!CHIP_IS_E1(sc) && sc->dropless_fc) { pause_enabled = 0; if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { pause_enabled = 1; } REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), pause_enabled); } if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { pstats = BXE_SP(sc, port_stats); /* reset old mac stats */ memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); } if (sc->state == BXE_STATE_OPEN) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); /* Restart tx when the link comes back. */ FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; taskqueue_enqueue(fp->tq, &fp->tx_task); } } } if (sc->link_vars.link_up && sc->link_vars.line_speed) { cmng_fns = bxe_get_cmng_fns_mode(sc); if (cmng_fns != CMNG_FNS_NONE) { bxe_cmng_fns_init(sc, FALSE, cmng_fns); storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } else { /* rate shaping and fairness are disabled */ BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); } } bxe_link_report_locked(sc); if (IS_MF(sc)) { ; // XXX bxe_link_sync_notify(sc); } } static void bxe_attn_int_asserted(struct bxe_softc *sc, uint32_t asserted) { int port = SC_PORT(sc); uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : NIG_REG_MASK_INTERRUPT_PORT0; uint32_t aeu_mask; uint32_t nig_mask = 0; uint32_t reg_addr; uint32_t igu_acked; uint32_t cnt; if (sc->attn_state & asserted) { BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(sc, aeu_addr); BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", aeu_mask, asserted); aeu_mask &= ~(asserted & 0x3ff); BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); REG_WR(sc, aeu_addr, aeu_mask); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); sc->attn_state |= asserted; BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); if (asserted & ATTN_HARD_WIRED_MASK) { if (asserted & ATTN_NIG_FOR_FUNC) { bxe_acquire_phy_lock(sc); /* save nig interrupt mask */ nig_mask = REG_RD(sc, nig_int_mask_addr); /* If nig_mask is not set, no need to call the update function */ if (nig_mask) { REG_WR(sc, nig_int_mask_addr, 0); bxe_link_attn(sc); } /* handle unicore attn? */ } if (asserted & ATTN_SW_TIMER_4_FUNC) { BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); } if (asserted & GPIO_2_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); } if (asserted & GPIO_3_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); } if (asserted & GPIO_4_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); } if (port == 0) { if (asserted & ATTN_GENERAL_ATTN_1) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); } if (asserted & ATTN_GENERAL_ATTN_2) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); } if (asserted & ATTN_GENERAL_ATTN_3) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); } } else { if (asserted & ATTN_GENERAL_ATTN_4) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); } if (asserted & ATTN_GENERAL_ATTN_5) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); } if (asserted & ATTN_GENERAL_ATTN_6) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); } } } /* hardwired */ if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); } else { reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); } BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", asserted, (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(sc, reg_addr, asserted); /* now set back the mask */ if (asserted & ATTN_NIG_FOR_FUNC) { /* * Verify that IGU ack through BAR was written before restoring * NIG mask. This loop should exit after 2-3 iterations max. */ if (sc->devinfo.int_block != INT_BLOCK_HC) { cnt = 0; do { igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && (++cnt < MAX_IGU_ATTN_ACK_TO)); if (!igu_acked) { BLOGE(sc, "Failed to verify IGU ack on time\n"); } mb(); } REG_WR(sc, nig_int_mask_addr, nig_mask); bxe_release_phy_lock(sc); } } static void bxe_print_next_block(struct bxe_softc *sc, int idx, const char *blk) { BLOGI(sc, "%s%s", idx ? ", " : "", blk); } static int bxe_check_blocks_with_parity0(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "BRB"); break; case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PARSER"); break; case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TSDM"); break; case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "SEARCHER"); break; case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TCM"); break; case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TSEMI"); break; case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XPB"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity1(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t *global, uint8_t print) { int i = 0; uint32_t cur_bit = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PBF"); break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "QM"); break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TM"); break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XSDM"); break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XCM"); break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XSEMI"); break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DOORBELLQ"); break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "NIG"); break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DEBUG"); break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "USDM"); break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "UCM"); break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "USEMI"); break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "UPB"); break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CSDM"); break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CCM"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity2(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CSEMI"); break; case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PXP"); break; case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); break; case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CFC"); break; case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CDU"); break; case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DMAE"); break; case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "IGU"); break; case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "MISC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity3(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t *global, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP ROM"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP UMP RX"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP UMP TX"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP SCPAD"); *global = TRUE; break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity4(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PGLUE_B"); break; case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "ATC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static uint8_t bxe_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print, uint32_t *sig) { int par_num = 0; if ((sig[0] & HW_PRTY_ASSERT_SET_0) || (sig[1] & HW_PRTY_ASSERT_SET_1) || (sig[2] & HW_PRTY_ASSERT_SET_2) || (sig[3] & HW_PRTY_ASSERT_SET_3) || (sig[4] & HW_PRTY_ASSERT_SET_4)) { BLOGE(sc, "Parity error: HW block parity attention:\n" "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); if (print) BLOGI(sc, "Parity errors detected in blocks: "); par_num = bxe_check_blocks_with_parity0(sc, sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); par_num = bxe_check_blocks_with_parity1(sc, sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); par_num = bxe_check_blocks_with_parity2(sc, sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); par_num = bxe_check_blocks_with_parity3(sc, sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); par_num = bxe_check_blocks_with_parity4(sc, sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); if (print) BLOGI(sc, "\n"); if( *global == TRUE ) { BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL); } return (TRUE); } return (FALSE); } static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print) { struct attn_route attn = { {0} }; int port = SC_PORT(sc); if(sc->state != BXE_STATE_OPEN) return FALSE; attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); /* * Since MCP attentions can't be disabled inside the block, we need to * read AEU registers to see whether they're currently disabled */ attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) & MISC_AEU_ENABLE_MCP_PRTY_BITS) | ~MISC_AEU_ENABLE_MCP_PRTY_BITS); if (!CHIP_IS_E1x(sc)) attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); return (bxe_parity_attn(sc, global, print, attn.sig)); } static void bxe_attn_int_deasserted4(struct bxe_softc *sc, uint32_t attn) { uint32_t val; boolean_t err_flg = FALSE; if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); err_flg = TRUE; if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); } if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); BLOGE(sc, "ATC hw attention 0x%08x\n", val); err_flg = TRUE; if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); } if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { BLOGE(sc, "FATAL parity attention set4 0x%08x\n", (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); err_flg = TRUE; } if (err_flg) { BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } } static void bxe_e1h_disable(struct bxe_softc *sc) { int port = SC_PORT(sc); bxe_tx_disable(sc); REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); } static void bxe_e1h_enable(struct bxe_softc *sc) { int port = SC_PORT(sc); REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); // XXX bxe_tx_enable(sc); } /* * called due to MCP event (on pmf): * reread new bandwidth configuration * configure FW * notify others function about the change */ static void bxe_config_mf_bw(struct bxe_softc *sc) { if (sc->link_vars.link_up) { bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); // XXX bxe_link_sync_notify(sc); } storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } static void bxe_set_mf_bw(struct bxe_softc *sc) { bxe_config_mf_bw(sc); bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); } static void bxe_handle_eee_event(struct bxe_softc *sc) { BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); } #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 static void bxe_drv_info_ether_stat(struct bxe_softc *sc) { struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat; strlcpy(ether_stat->version, BXE_DRIVER_VERSION, ETH_STAT_INFO_VERSION_LEN); /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, ether_stat->mac_local + MAC_PAD, MAC_PAD, ETH_ALEN); ether_stat->mtu_size = sc->mtu; ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; } // XXX ether_stat->feature_flags |= ???; ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; ether_stat->txq_size = sc->tx_ring_size; ether_stat->rxq_size = sc->rx_ring_size; } static void bxe_handle_drv_info_req(struct bxe_softc *sc) { enum drv_info_opcode op_code; uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); /* if drv_info version supported by MFW doesn't match - send NACK */ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> DRV_INFO_CONTROL_OP_CODE_SHIFT); memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); switch (op_code) { case ETH_STATS_OPCODE: bxe_drv_info_ether_stat(sc); break; case FCOE_STATS_OPCODE: case ISCSI_STATS_OPCODE: default: /* if op code isn't supported - send NACK */ bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } /* * If we got drv_info attn from MFW then these fields are defined in * shmem2 for sure */ SHMEM2_WR(sc, drv_info_host_addr_lo, U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); SHMEM2_WR(sc, drv_info_host_addr_hi, U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); } static void bxe_dcc_event(struct bxe_softc *sc, uint32_t dcc_event) { BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { /* * This is the only place besides the function initialization * where the sc->flags can change so it is done without any * locks */ if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); sc->flags |= BXE_MF_FUNC_DIS; bxe_e1h_disable(sc); } else { BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); sc->flags &= ~BXE_MF_FUNC_DIS; bxe_e1h_enable(sc); } dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; } if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { bxe_config_mf_bw(sc); dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; } /* Report results to MCP */ if (dcc_event) bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); else bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); } static void bxe_pmf_update(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t val; sc->port.pmf = 1; BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); /* * We need the mb() to ensure the ordering between the writing to * sc->port.pmf here and reading it from the bxe_periodic_task(). */ mb(); /* queue a periodic task */ // XXX schedule task... // XXX bxe_dcbx_pmf_update(sc); /* enable nig attention */ val = (0xff0f | (1 << (SC_VN(sc) + 4))); if (sc->devinfo.int_block == INT_BLOCK_HC) { REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); } else if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); } bxe_stats_handle(sc, STATS_EVENT_PMF); } static int bxe_mc_assert(struct bxe_softc *sc) { char last_idx; int i, rc = 0; uint32_t row0, row1, row2, row3; /* XSTORM */ last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* TSTORM */ last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* CSTORM */ last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* USTORM */ last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } return (rc); } static void bxe_attn_int_deasserted3(struct bxe_softc *sc, uint32_t attn) { int func = SC_FUNC(sc); uint32_t val; if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { if (attn & BXE_PMF_LINK_ASSERT(sc)) { REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bxe_read_mf_cfg(sc); sc->devinfo.mf_info.mf_config[SC_VN(sc)] = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); if (val & DRV_STATUS_DCC_EVENT_MASK) bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); if (val & DRV_STATUS_SET_MF_BW) bxe_set_mf_bw(sc); if (val & DRV_STATUS_DRV_INFO_REQ) bxe_handle_drv_info_req(sc); if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) bxe_pmf_update(sc); if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) bxe_handle_eee_event(sc); if (sc->link_vars.periodic_flags & ELINK_PERIODIC_FLAGS_LINK_EVENT) { /* sync with link */ bxe_acquire_phy_lock(sc); sc->link_vars.periodic_flags &= ~ELINK_PERIODIC_FLAGS_LINK_EVENT; bxe_release_phy_lock(sc); if (IS_MF(sc)) ; // XXX bxe_link_sync_notify(sc); bxe_link_report(sc); } /* * Always call it here: bxe_link_report() will * prevent the link indication duplication. */ bxe_link_status_update(sc); } else if (attn & BXE_MC_ASSERT_BITS) { BLOGE(sc, "MC assert!\n"); bxe_mc_assert(sc); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); bxe_int_disable(sc); BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } else if (attn & BXE_MCP_ASSERT) { BLOGE(sc, "MCP assert!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); bxe_int_disable(sc); /*avoid repetive assert alert */ } else { BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); } } if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); if (attn & BXE_GRC_TIMEOUT) { val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); BLOGE(sc, "GRC time-out 0x%08x\n", val); } if (attn & BXE_GRC_RSV) { val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); BLOGE(sc, "GRC reserved 0x%08x\n", val); } REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); } } static void bxe_attn_int_deasserted2(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val0, mask0, val1, mask1; uint32_t val; boolean_t err_flg = FALSE; if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); BLOGE(sc, "CFC hw attention 0x%08x\n", val); /* CFC error attention */ if (val & 0x2) { BLOGE(sc, "FATAL error from CFC\n"); err_flg = TRUE; } } if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); /* RQ_USDMDP_FIFO_OVERFLOW */ if (val & 0x18000) { BLOGE(sc, "FATAL error from PXP\n"); err_flg = TRUE; } if (!CHIP_IS_E1x(sc)) { val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); err_flg = TRUE; } } #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT if (attn & AEU_PXP2_HW_INT_BIT) { /* CQ47854 workaround do not panic on * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR */ if (!CHIP_IS_E1x(sc)) { mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); /* * If the only PXP2_EOP_ERROR_BIT is set in * STS0 and STS1 - clear it * * probably we lose additional attentions between * STS0 and STS_CLR0, in this case user will not * be notified about them */ if (val0 & mask0 & PXP2_EOP_ERROR_BIT && !(val1 & mask1)) val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); /* print the register, since no one can restore it */ BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); /* * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR * then notify */ if (val0 & PXP2_EOP_ERROR_BIT) { BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); err_flg = TRUE; /* * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is * set then clear attention from PXP2 block without panic */ if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && ((val1 & mask1) == 0)) attn &= ~AEU_PXP2_HW_INT_BIT; } } } if (attn & HW_INTERRUT_ASSERT_SET_2) { reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); REG_WR(sc, reg_offset, val); BLOGE(sc, "FATAL HW block attention set2 0x%x\n", (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); err_flg = TRUE; bxe_panic(sc, ("HW block attention set2\n")); } if(err_flg) { BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } } static void bxe_attn_int_deasserted1(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val; boolean_t err_flg = FALSE; if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); BLOGE(sc, "DB hw attention 0x%08x\n", val); /* DORQ discard attention */ if (val & 0x2) { BLOGE(sc, "FATAL error from DORQ\n"); err_flg = TRUE; } } if (attn & HW_INTERRUT_ASSERT_SET_1) { reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); REG_WR(sc, reg_offset, val); BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); err_flg = TRUE; bxe_panic(sc, ("HW block attention set1\n")); } if(err_flg) { BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } } static void bxe_attn_int_deasserted0(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val; reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { val = REG_RD(sc, reg_offset); val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(sc, reg_offset, val); BLOGW(sc, "SPIO5 hw attention\n"); /* Fan failure attention */ elink_hw_reset_phy(&sc->link_params); bxe_fan_failure(sc); } if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { bxe_acquire_phy_lock(sc); elink_handle_module_detect_int(&sc->link_params); bxe_release_phy_lock(sc); } if (attn & HW_INTERRUT_ASSERT_SET_0) { val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); REG_WR(sc, reg_offset, val); BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", (attn & HW_INTERRUT_ASSERT_SET_0))); } } static void bxe_attn_int_deasserted(struct bxe_softc *sc, uint32_t deasserted) { struct attn_route attn; struct attn_route *group_mask; int port = SC_PORT(sc); int index; uint32_t reg_addr; uint32_t val; uint32_t aeu_mask; uint8_t global = FALSE; /* * Need to take HW lock because MCP or other port might also * try to handle this event. */ bxe_acquire_alr(sc); if (bxe_chk_parity_attn(sc, &global, TRUE)) { /* XXX * In case of parity errors don't handle attentions so that * other function would "see" parity errors. */ // XXX schedule a recovery task... /* disable HW interrupts */ bxe_int_disable(sc); BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); bxe_release_alr(sc); return; } attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); if (!CHIP_IS_E1x(sc)) { attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); } else { attn.sig[4] = 0; } BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { if (deasserted & (1 << index)) { group_mask = &sc->attn_group[index]; BLOGD(sc, DBG_INTR, "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, group_mask->sig[0], group_mask->sig[1], group_mask->sig[2], group_mask->sig[3], group_mask->sig[4]); bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); } } bxe_release_alr(sc); if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); } else { reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); } val = ~deasserted; BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", val, (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(sc, reg_addr, val); if (~sc->attn_state & deasserted) { BLOGE(sc, "IGU error\n"); } reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(sc, reg_addr); BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", aeu_mask, deasserted); aeu_mask |= (deasserted & 0x3ff); BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); REG_WR(sc, reg_addr, aeu_mask); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); sc->attn_state &= ~deasserted; BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); } static void bxe_attn_int(struct bxe_softc *sc) { /* read local copy of bits */ uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); uint32_t attn_state = sc->attn_state; /* look for changed bits */ uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; uint32_t deasserted = ~attn_bits & attn_ack & attn_state; BLOGD(sc, DBG_INTR, "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", attn_bits, attn_ack, asserted, deasserted); if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { BLOGE(sc, "BAD attention state\n"); } /* handle bits that were raised */ if (asserted) { bxe_attn_int_asserted(sc, asserted); } if (deasserted) { bxe_attn_int_deasserted(sc, deasserted); } } static uint16_t bxe_update_dsb_idx(struct bxe_softc *sc) { struct host_sp_status_block *def_sb = sc->def_sb; uint16_t rc = 0; mb(); /* status block is written to by the chip */ if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; rc |= BXE_DEF_SB_ATT_IDX; } if (sc->def_idx != def_sb->sp_sb.running_index) { sc->def_idx = def_sb->sp_sb.running_index; rc |= BXE_DEF_SB_IDX; } mb(); return (rc); } static inline struct ecore_queue_sp_obj * bxe_cid_to_q_obj(struct bxe_softc *sc, uint32_t cid) { BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); } static void bxe_handle_mcast_eqe(struct bxe_softc *sc) { struct ecore_mcast_ramrod_params rparam; int rc; memset(&rparam, 0, sizeof(rparam)); rparam.mcast_obj = &sc->mcast_obj; BXE_MCAST_LOCK(sc); /* clear pending state for the last command */ sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); /* if there are pending mcast commands - send them */ if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); if (rc < 0) { BLOGD(sc, DBG_SP, "ERROR: Failed to send pending mcast commands (%d)\n", rc); } } BXE_MCAST_UNLOCK(sc); } static void bxe_handle_classification_eqe(struct bxe_softc *sc, union event_ring_elem *elem) { unsigned long ramrod_flags = 0; int rc = 0; uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; struct ecore_vlan_mac_obj *vlan_mac_obj; /* always push next commands out, don't wait here */ bit_set(&ramrod_flags, RAMROD_CONT); switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { case ECORE_FILTER_MAC_PENDING: BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); vlan_mac_obj = &sc->sp_objs[cid].mac_obj; break; case ECORE_FILTER_MCAST_PENDING: BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); /* * This is only relevant for 57710 where multicast MACs are * configured as unicast MACs using the same ramrod. */ bxe_handle_mcast_eqe(sc); return; default: BLOGE(sc, "Unsupported classification command: %d\n", elem->message.data.eth_event.echo); return; } rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); if (rc < 0) { BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); } else if (rc > 0) { BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); } } static void bxe_handle_rx_mode_eqe(struct bxe_softc *sc, union event_ring_elem *elem) { bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); /* send rx_mode command again if was requested */ if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state)) { bxe_set_storm_rx_mode(sc); } } static void bxe_update_eq_prod(struct bxe_softc *sc, uint16_t prod) { storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); wmb(); /* keep prod updates ordered */ } static void bxe_eq_int(struct bxe_softc *sc) { uint16_t hw_cons, sw_cons, sw_prod; union event_ring_elem *elem; uint8_t echo; uint32_t cid; uint8_t opcode; int spqe_cnt = 0; struct ecore_queue_sp_obj *q_obj; struct ecore_func_sp_obj *f_obj = &sc->func_obj; struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; hw_cons = le16toh(*sc->eq_cons_sb); /* * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. * when we get to the next-page we need to adjust so the loop * condition below will be met. The next element is the size of a * regular element and hence incrementing by 1 */ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { hw_cons++; } /* * This function may never run in parallel with itself for a * specific sc and no need for a read memory barrier here. */ sw_cons = sc->eq_cons; sw_prod = sc->eq_prod; BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); for (; sw_cons != hw_cons; sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { elem = &sc->eq[EQ_DESC(sw_cons)]; /* elem CID originates from FW, actually LE */ cid = SW_CID(elem->message.data.cfc_del_event.cid); opcode = elem->message.opcode; /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_STAT_QUERY: BLOGD(sc, DBG_SP, "got statistics completion event %d\n", sc->stats_comp++); /* nothing to do with stats comp */ goto next_spqe; case EVENT_RING_OPCODE_CFC_DEL: /* handle according to cid range */ /* we may want to verify here that the sc state is HALTING */ BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); q_obj = bxe_cid_to_q_obj(sc, cid); if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { break; } goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { break; } // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { break; } // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_UPDATE: echo = elem->message.data.function_update_event.echo; if (echo == SWITCH_UPDATE) { BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_SWITCH_UPDATE)) { break; } } else { BLOGD(sc, DBG_SP, "AFEX: ramrod completed FUNCTION_UPDATE\n"); } goto next_spqe; case EVENT_RING_OPCODE_FORWARD_SETUP: q_obj = &bxe_fwd_sp_obj(sc, q_obj); if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_SETUP_TX_ONLY)) { break; } goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_START: BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { break; } goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_STOP: BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { break; } goto next_spqe; } switch (opcode | sc->state) { case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); rss_raw->clear_pending(rss_raw); break; case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); bxe_handle_classification_eqe(sc, elem); break; case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got mcast ramrod\n"); bxe_handle_mcast_eqe(sc); break; case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); bxe_handle_rx_mode_eqe(sc, elem); break; default: /* unknown event log error and continue */ BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", elem->message.opcode, sc->state); } next_spqe: spqe_cnt++; } /* for */ mb(); atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); sc->eq_cons = sw_cons; sc->eq_prod = sw_prod; /* make sure that above mem writes were issued towards the memory */ wmb(); /* update producer */ bxe_update_eq_prod(sc, sc->eq_prod); } static void bxe_handle_sp_tq(void *context, int pending) { struct bxe_softc *sc = (struct bxe_softc *)context; uint16_t status; BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); /* what work needs to be performed? */ status = bxe_update_dsb_idx(sc); BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); /* HW attentions */ if (status & BXE_DEF_SB_ATT_IDX) { BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); bxe_attn_int(sc); status &= ~BXE_DEF_SB_ATT_IDX; } /* SP events: STAT_QUERY and others */ if (status & BXE_DEF_SB_IDX) { /* handle EQ completions */ BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); bxe_eq_int(sc); bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, le16toh(sc->def_idx), IGU_INT_NOP, 1); status &= ~BXE_DEF_SB_IDX; } /* if status is non zero then something went wrong */ if (__predict_false(status)) { BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); } /* ack status block only if something was actually handled */ bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); /* * Must be called after the EQ processing (since eq leads to sriov * ramrod completion flows). * This flow may have been scheduled by the arrival of a ramrod * completion, or by the sriov code rescheduling itself. */ // XXX bxe_iov_sp_task(sc); } static void bxe_handle_fp_tq(void *context, int pending) { struct bxe_fastpath *fp = (struct bxe_fastpath *)context; struct bxe_softc *sc = fp->sc; uint8_t more_tx = FALSE; uint8_t more_rx = FALSE; BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); /* XXX * IFF_DRV_RUNNING state can't be checked here since we process * slowpath events on a client queue during setup. Instead * we need to add a "process/continue" flag here that the driver * can use to tell the task here not to do anything. */ #if 0 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { return; } #endif /* update the fastpath index */ bxe_update_fp_sb_idx(fp); /* XXX add loop here if ever support multiple tx CoS */ /* fp->txdata[cos] */ if (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); more_tx = bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); } if (bxe_has_rx_work(fp)) { more_rx = bxe_rxeof(sc, fp); } if (more_rx /*|| more_tx*/) { /* still more work to do */ taskqueue_enqueue(fp->tq, &fp->tq_task); return; } bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } static void bxe_task_fp(struct bxe_fastpath *fp) { struct bxe_softc *sc = fp->sc; uint8_t more_tx = FALSE; uint8_t more_rx = FALSE; BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); /* update the fastpath index */ bxe_update_fp_sb_idx(fp); /* XXX add loop here if ever support multiple tx CoS */ /* fp->txdata[cos] */ if (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); more_tx = bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); } if (bxe_has_rx_work(fp)) { more_rx = bxe_rxeof(sc, fp); } if (more_rx /*|| more_tx*/) { /* still more work to do, bail out if this ISR and process later */ taskqueue_enqueue(fp->tq, &fp->tq_task); return; } /* * Here we write the fastpath index taken before doing any tx or rx work. * It is very well possible other hw events occurred up to this point and * they were actually processed accordingly above. Since we're going to * write an older fastpath index, an interrupt is coming which we might * not do any work in. */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } /* * Legacy interrupt entry point. * * Verifies that the controller generated the interrupt and * then calls a separate routine to handle the various * interrupt causes: link, RX, and TX. */ static void bxe_intr_legacy(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; struct bxe_fastpath *fp; uint16_t status, mask; int i; BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); /* * 0 for ustorm, 1 for cstorm * the bits returned from ack_int() are 0-15 * bit 0 = attention status block * bit 1 = fast path status block * a mask of 0x2 or more = tx/rx event * a mask of 1 = slow path event */ status = bxe_ack_int(sc); /* the interrupt is not for us */ if (__predict_false(status == 0)) { BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); return; } BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); if (status & mask) { /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bxe_task_fp(fp); status &= ~mask; } } if (__predict_false(status & 0x1)) { /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); /* schedule slowpath handler */ taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); status &= ~0x1; } if (__predict_false(status)) { BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); } } /* slowpath interrupt entry point */ static void bxe_intr_sp(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); /* schedule slowpath handler */ taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); } /* fastpath interrupt entry point */ static void bxe_intr_fp(void *xfp) { struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; struct bxe_softc *sc = fp->sc; BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); BLOGD(sc, DBG_INTR, "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bxe_task_fp(fp); } /* Release all interrupts allocated by the driver. */ static void bxe_interrupt_free(struct bxe_softc *sc) { int i; switch (sc->interrupt_mode) { case INTR_MODE_INTX: BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); if (sc->intr[0].resource != NULL) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[0].rid, sc->intr[0].resource); } break; case INTR_MODE_MSI: for (i = 0; i < sc->intr_count; i++) { BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); if (sc->intr[i].resource && sc->intr[i].rid) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[i].rid, sc->intr[i].resource); } } pci_release_msi(sc->dev); break; case INTR_MODE_MSIX: for (i = 0; i < sc->intr_count; i++) { BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); if (sc->intr[i].resource && sc->intr[i].rid) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[i].rid, sc->intr[i].resource); } } pci_release_msi(sc->dev); break; default: /* nothing to do as initial allocation failed */ break; } } /* * This function determines and allocates the appropriate * interrupt based on system capabilites and user request. * * The user may force a particular interrupt mode, specify * the number of receive queues, specify the method for * distribuitng received frames to receive queues, or use * the default settings which will automatically select the * best supported combination. In addition, the OS may or * may not support certain combinations of these settings. * This routine attempts to reconcile the settings requested * by the user with the capabilites available from the system * to select the optimal combination of features. * * Returns: * 0 = Success, !0 = Failure. */ static int bxe_interrupt_alloc(struct bxe_softc *sc) { int msix_count = 0; int msi_count = 0; int num_requested = 0; int num_allocated = 0; int rid, i, j; int rc; /* get the number of available MSI/MSI-X interrupts from the OS */ if (sc->interrupt_mode > 0) { if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { msix_count = pci_msix_count(sc->dev); } if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { msi_count = pci_msi_count(sc->dev); } BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", msi_count, msix_count); } do { /* try allocating MSI-X interrupt resources (at least 2) */ if (sc->interrupt_mode != INTR_MODE_MSIX) { break; } if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || (msix_count < 2)) { sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ break; } /* ask for the necessary number of MSI-X vectors */ num_requested = min((sc->num_queues + 1), msix_count); BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); num_allocated = num_requested; if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ break; } if (num_allocated < 2) { /* possible? */ BLOGE(sc, "MSI-X allocation less than 2!\n"); sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ pci_release_msi(sc->dev); break; } BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", num_requested, num_allocated); /* best effort so use the number of vectors allocated to us */ sc->intr_count = num_allocated; sc->num_queues = num_allocated - 1; rid = 1; /* initial resource identifier */ /* allocate the MSI-X vectors */ for (i = 0; i < num_allocated; i++) { sc->intr[i].rid = (rid + i); if ((sc->intr[i].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[i].rid, RF_ACTIVE)) == NULL) { BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", i, (rid + i)); for (j = (i - 1); j >= 0; j--) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[j].rid, sc->intr[j].resource); } sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ pci_release_msi(sc->dev); break; } BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); } } while (0); do { /* try allocating MSI vector resources (at least 2) */ if (sc->interrupt_mode != INTR_MODE_MSI) { break; } if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || (msi_count < 1)) { sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ break; } /* ask for a single MSI vector */ num_requested = 1; BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); num_allocated = num_requested; if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { BLOGE(sc, "MSI alloc failed (%d)!\n", rc); sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ break; } if (num_allocated != 1) { /* possible? */ BLOGE(sc, "MSI allocation is not 1!\n"); sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ pci_release_msi(sc->dev); break; } BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", num_requested, num_allocated); /* best effort so use the number of vectors allocated to us */ sc->intr_count = num_allocated; sc->num_queues = num_allocated; rid = 1; /* initial resource identifier */ sc->intr[0].rid = rid; if ((sc->intr[0].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[0].rid, RF_ACTIVE)) == NULL) { BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ pci_release_msi(sc->dev); break; } BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); } while (0); do { /* try allocating INTx vector resources */ if (sc->interrupt_mode != INTR_MODE_INTX) { break; } BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); /* only one vector for INTx */ sc->intr_count = 1; sc->num_queues = 1; rid = 0; /* initial resource identifier */ sc->intr[0].rid = rid; if ((sc->intr[0].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[0].rid, (RF_ACTIVE | RF_SHAREABLE))) == NULL) { BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = -1; /* Failed! */ break; } BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); } while (0); if (sc->interrupt_mode == -1) { BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); rc = 1; } else { BLOGD(sc, DBG_LOAD, "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", sc->interrupt_mode, sc->num_queues); rc = 0; } return (rc); } static void bxe_interrupt_detach(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; /* release interrupt resources */ for (i = 0; i < sc->intr_count; i++) { if (sc->intr[i].resource && sc->intr[i].tag) { BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); } } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tq) { taskqueue_drain(fp->tq, &fp->tq_task); taskqueue_drain(fp->tq, &fp->tx_task); while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task, NULL)) taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task); } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tq != NULL) { taskqueue_free(fp->tq); fp->tq = NULL; } } } if (sc->sp_tq) { taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); taskqueue_free(sc->sp_tq); sc->sp_tq = NULL; } } /* * Enables interrupts and attach to the ISR. * * When using multiple MSI/MSI-X vectors the first vector * is used for slowpath operations while all remaining * vectors are used for fastpath operations. If only a * single MSI/MSI-X vector is used (SINGLE_ISR) then the * ISR must look for both slowpath and fastpath completions. */ static int bxe_interrupt_attach(struct bxe_softc *sc) { struct bxe_fastpath *fp; int rc = 0; int i; snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), "bxe%d_sp_tq", sc->unit); TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT, taskqueue_thread_enqueue, &sc->sp_tq); taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ "%s", sc->sp_tq_name); for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; snprintf(fp->tq_name, sizeof(fp->tq_name), "bxe%d_fp%d_tq", sc->unit, i); NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp); fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT, taskqueue_thread_enqueue, &fp->tq); TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0, bxe_tx_mq_start_deferred, fp); taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ "%s", fp->tq_name); } /* setup interrupt handlers */ if (sc->interrupt_mode == INTR_MODE_MSIX) { BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); /* * Setup the interrupt handler. Note that we pass the driver instance * to the interrupt handler for the slowpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_sp, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); goto bxe_interrupt_attach_exit; } bus_describe_intr(sc->dev, sc->intr[0].resource, sc->intr[0].tag, "sp"); /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ /* initialize the fastpath vectors (note the first was used for sp) */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); /* * Setup the interrupt handler. Note that we pass the * fastpath context to the interrupt handler in this * case. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_fp, fp, &sc->intr[i + 1].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", (i + 1), rc); goto bxe_interrupt_attach_exit; } bus_describe_intr(sc->dev, sc->intr[i + 1].resource, sc->intr[i + 1].tag, "fp%02d", i); /* bind the fastpath instance to a cpu */ if (sc->num_queues > 1) { bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); } fp->state = BXE_FP_STATE_IRQ; } } else if (sc->interrupt_mode == INTR_MODE_MSI) { BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); /* * Setup the interrupt handler. Note that we pass the * driver instance to the interrupt handler which * will handle both the slowpath and fastpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_legacy, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); goto bxe_interrupt_attach_exit; } } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); /* * Setup the interrupt handler. Note that we pass the * driver instance to the interrupt handler which * will handle both the slowpath and fastpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_legacy, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); goto bxe_interrupt_attach_exit; } } bxe_interrupt_attach_exit: return (rc); } static int bxe_init_hw_common_chip(struct bxe_softc *sc); static int bxe_init_hw_common(struct bxe_softc *sc); static int bxe_init_hw_port(struct bxe_softc *sc); static int bxe_init_hw_func(struct bxe_softc *sc); static void bxe_reset_common(struct bxe_softc *sc); static void bxe_reset_port(struct bxe_softc *sc); static void bxe_reset_func(struct bxe_softc *sc); static int bxe_gunzip_init(struct bxe_softc *sc); static void bxe_gunzip_end(struct bxe_softc *sc); static int bxe_init_firmware(struct bxe_softc *sc); static void bxe_release_firmware(struct bxe_softc *sc); static struct ecore_func_sp_drv_ops bxe_func_sp_drv = { .init_hw_cmn_chip = bxe_init_hw_common_chip, .init_hw_cmn = bxe_init_hw_common, .init_hw_port = bxe_init_hw_port, .init_hw_func = bxe_init_hw_func, .reset_hw_cmn = bxe_reset_common, .reset_hw_port = bxe_reset_port, .reset_hw_func = bxe_reset_func, .gunzip_init = bxe_gunzip_init, .gunzip_end = bxe_gunzip_end, .init_fw = bxe_init_firmware, .release_fw = bxe_release_firmware, }; static void bxe_init_func_obj(struct bxe_softc *sc) { sc->dmae_ready = 0; ecore_init_func_obj(sc, &sc->func_obj, BXE_SP(sc, func_rdata), BXE_SP_MAPPING(sc, func_rdata), BXE_SP(sc, func_afex_rdata), BXE_SP_MAPPING(sc, func_afex_rdata), &bxe_func_sp_drv); } static int bxe_init_hw(struct bxe_softc *sc, uint32_t load_code) { struct ecore_func_state_params func_params = { NULL }; int rc; /* prepare the parameters for function state transitions */ bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_HW_INIT; func_params.params.hw_init.load_phase = load_code; /* * Via a plethora of function pointers, we will eventually reach * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). */ rc = ecore_func_state_change(sc, &func_params); return (rc); } static void bxe_fill(struct bxe_softc *sc, uint32_t addr, int fill, uint32_t len) { uint32_t i; if (!(len % 4) && !(addr % 4)) { for (i = 0; i < len; i += 4) { REG_WR(sc, (addr + i), fill); } } else { for (i = 0; i < len; i++) { REG_WR8(sc, (addr + i), fill); } } } /* writes FP SP data to FW - data_size in dwords */ static void bxe_wr_fp_sb_data(struct bxe_softc *sc, int fw_sb_id, uint32_t *sb_data_p, uint32_t data_size) { int index; for (index = 0; index < data_size; index++) { REG_WR(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + (sizeof(uint32_t) * index)), *(sb_data_p + index)); } } static void bxe_zero_fp_sb(struct bxe_softc *sc, int fw_sb_id) { struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; uint32_t *sb_data_p; uint32_t data_size = 0; if (!CHIP_IS_E1x(sc)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_DISABLED; sb_data_e2.common.p_func.vf_valid = FALSE; sb_data_p = (uint32_t *)&sb_data_e2; data_size = (sizeof(struct hc_status_block_data_e2) / sizeof(uint32_t)); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_DISABLED; sb_data_e1x.common.p_func.vf_valid = FALSE; sb_data_p = (uint32_t *)&sb_data_e1x; data_size = (sizeof(struct hc_status_block_data_e1x) / sizeof(uint32_t)); } bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0, CSTORM_STATUS_BLOCK_SIZE); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 0, CSTORM_SYNC_BLOCK_SIZE); } static void bxe_wr_sp_sb_data(struct bxe_softc *sc, struct hc_sp_status_block_data *sp_sb_data) { int i; for (i = 0; i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); i++) { REG_WR(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + (i * sizeof(uint32_t))), *((uint32_t *)sp_sb_data + i)); } } static void bxe_zero_sp_sb(struct bxe_softc *sc) { struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); sp_sb_data.state = SB_DISABLED; sp_sb_data.p_func.vf_valid = FALSE; bxe_wr_sp_sb_data(sc, &sp_sb_data); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 0, CSTORM_SP_STATUS_BLOCK_SIZE); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 0, CSTORM_SP_SYNC_BLOCK_SIZE); } static void bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { hc_sm->igu_sb_id = igu_sb_id; hc_sm->igu_seg_id = igu_seg_id; hc_sm->timer_value = 0xFF; hc_sm->time_to_expire = 0xFFFFFFFF; } static void bxe_map_sb_state_machines(struct hc_index_data *index_data) { /* zero out state machine indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; /* map indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); } static void bxe_init_sb(struct bxe_softc *sc, bus_addr_t busaddr, int vfid, uint8_t vf_valid, int fw_sb_id, int igu_sb_id) { struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; struct hc_status_block_sm *hc_sm_p; uint32_t *sb_data_p; int igu_seg_id; int data_size; if (CHIP_INT_MODE_IS_BC(sc)) { igu_seg_id = HC_SEG_ACCESS_NORM; } else { igu_seg_id = IGU_SEG_ACCESS_NORM; } bxe_zero_fp_sb(sc, fw_sb_id); if (!CHIP_IS_E1x(sc)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_ENABLED; sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); sb_data_e2.common.p_func.vf_id = vfid; sb_data_e2.common.p_func.vf_valid = vf_valid; sb_data_e2.common.p_func.vnic_id = SC_VN(sc); sb_data_e2.common.same_igu_sb_1b = TRUE; sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); hc_sm_p = sb_data_e2.common.state_machine; sb_data_p = (uint32_t *)&sb_data_e2; data_size = (sizeof(struct hc_status_block_data_e2) / sizeof(uint32_t)); bxe_map_sb_state_machines(sb_data_e2.index_data); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_ENABLED; sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); sb_data_e1x.common.p_func.vf_id = 0xff; sb_data_e1x.common.p_func.vf_valid = FALSE; sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); sb_data_e1x.common.same_igu_sb_1b = TRUE; sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); hc_sm_p = sb_data_e1x.common.state_machine; sb_data_p = (uint32_t *)&sb_data_e1x; data_size = (sizeof(struct hc_status_block_data_e1x) / sizeof(uint32_t)); bxe_map_sb_state_machines(sb_data_e1x.index_data); } bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); /* write indices to HW - PCI guarantees endianity of regpairs */ bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); } static inline uint8_t bxe_fp_qzone_id(struct bxe_fastpath *fp) { if (CHIP_IS_E1x(fp->sc)) { return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); } else { return (fp->cl_id); } } static inline uint32_t bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, struct bxe_fastpath *fp) { uint32_t offset = BAR_USTRORM_INTMEM; if (!CHIP_IS_E1x(sc)) { offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); } else { offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); } return (offset); } static void bxe_init_eth_fp(struct bxe_softc *sc, int idx) { struct bxe_fastpath *fp = &sc->fp[idx]; uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; unsigned long q_type = 0; int cos; fp->sc = sc; fp->index = idx; fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); fp->cl_id = (CHIP_IS_E1x(sc)) ? (SC_L_ID(sc) + idx) : /* want client ID same as IGU SB ID for non-E1 */ fp->igu_sb_id; fp->cl_qzone_id = bxe_fp_qzone_id(fp); /* setup sb indices */ if (!CHIP_IS_E1x(sc)) { fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; } else { fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; } /* init shortcut */ fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; /* * XXX If multiple CoS is ever supported then each fastpath structure * will need to maintain tx producer/consumer/dma/etc values *per* CoS. */ for (cos = 0; cos < sc->max_cos; cos++) { cids[cos] = idx; } fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; /* nothing more for a VF to do */ if (IS_VF(sc)) { return; } bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, fp->fw_sb_id, fp->igu_sb_id); bxe_update_fp_sb_idx(fp); /* Configure Queue State object */ bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); ecore_init_queue_obj(sc, &sc->sp_objs[idx].q_obj, fp->cl_id, cids, sc->max_cos, SC_FUNC(sc), BXE_SP(sc, q_rdata), BXE_SP_MAPPING(sc, q_rdata), q_type); /* configure classification DBs */ ecore_init_mac_obj(sc, &sc->sp_objs[idx].mac_obj, fp->cl_id, idx, SC_FUNC(sc), BXE_SP(sc, mac_rdata), BXE_SP_MAPPING(sc, mac_rdata), ECORE_FILTER_MAC_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool); BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); } static inline void bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod, uint16_t rx_sge_prod) { struct ustorm_eth_rx_producers rx_prods = { 0 }; uint32_t i; /* update producers */ rx_prods.bd_prod = rx_bd_prod; rx_prods.cqe_prod = rx_cq_prod; rx_prods.sge_prod = rx_sge_prod; /* * Make sure that the BD and SGE data is updated before updating the * producers since FW might read the BD/SGE right after the producer * is updated. * This is only applicable for weak-ordered memory model archs such * as IA-64. The following barrier is also mandatory since FW will * assumes BDs must have buffers. */ wmb(); for (i = 0; i < (sizeof(rx_prods) / 4); i++) { REG_WR(sc, (fp->ustorm_rx_prods_offset + (i * 4)), ((uint32_t *)&rx_prods)[i]); } wmb(); /* keep prod updates ordered */ BLOGD(sc, DBG_RX, "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); } static void bxe_init_rx_rings(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->rx_bd_cons = 0; /* * Activate the BD ring... * Warning, this will generate an interrupt (to the TSTORM) * so this can only be done after the chip is initialized */ bxe_update_rx_prod(sc, fp, fp->rx_bd_prod, fp->rx_cq_prod, fp->rx_sge_prod); if (i != 0) { continue; } if (CHIP_IS_E1(sc)) { REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), U64_LO(fp->rcq_dma.paddr)); REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), U64_HI(fp->rcq_dma.paddr)); } } } static void bxe_init_tx_ring_one(struct bxe_fastpath *fp) { SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1); fp->tx_db.data.zero_fill1 = 0; fp->tx_db.data.prod = 0; fp->tx_pkt_prod = 0; fp->tx_pkt_cons = 0; fp->tx_bd_prod = 0; fp->tx_bd_cons = 0; fp->eth_q_stats.tx_pkts = 0; } static inline void bxe_init_tx_rings(struct bxe_softc *sc) { int i; for (i = 0; i < sc->num_queues; i++) { bxe_init_tx_ring_one(&sc->fp[i]); } } static void bxe_init_def_sb(struct bxe_softc *sc) { struct host_sp_status_block *def_sb = sc->def_sb; bus_addr_t mapping = sc->def_sb_dma.paddr; int igu_sp_sb_index; int igu_seg_id; int port = SC_PORT(sc); int func = SC_FUNC(sc); int reg_offset, reg_offset_en5; uint64_t section; int index, sindex; struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); if (CHIP_INT_MODE_IS_BC(sc)) { igu_sp_sb_index = DEF_SB_IGU_ID; igu_seg_id = HC_SEG_ACCESS_DEF; } else { igu_sp_sb_index = sc->igu_dsb_id; igu_seg_id = IGU_SEG_ACCESS_DEF; } /* attentions */ section = ((uint64_t)mapping + offsetof(struct host_sp_status_block, atten_status_block)); def_sb->atten_status_block.status_block_id = igu_sp_sb_index; sc->attn_state = 0; reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { /* take care of sig[0]..sig[4] */ for (sindex = 0; sindex < 4; sindex++) { sc->attn_group[index].sig[sindex] = REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); } if (!CHIP_IS_E1x(sc)) { /* * enable5 is separate from the rest of the registers, * and the address skip is 4 and not 16 between the * different groups */ sc->attn_group[index].sig[4] = REG_RD(sc, (reg_offset_en5 + (0x4 * index))); } else { sc->attn_group[index].sig[4] = 0; } } if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_offset = (port) ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L; REG_WR(sc, reg_offset, U64_LO(section)); REG_WR(sc, (reg_offset + 4), U64_HI(section)); } else if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); } section = ((uint64_t)mapping + offsetof(struct host_sp_status_block, sp_sb)); bxe_zero_sp_sb(sc); /* PCI guarantees endianity of regpair */ sp_sb_data.state = SB_ENABLED; sp_sb_data.host_sb_addr.lo = U64_LO(section); sp_sb_data.host_sb_addr.hi = U64_HI(section); sp_sb_data.igu_sb_id = igu_sp_sb_index; sp_sb_data.igu_seg_id = igu_seg_id; sp_sb_data.p_func.pf_id = func; sp_sb_data.p_func.vnic_id = SC_VN(sc); sp_sb_data.p_func.vf_id = 0xff; bxe_wr_sp_sb_data(sc, &sp_sb_data); bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); } static void bxe_init_sp_ring(struct bxe_softc *sc) { atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); sc->spq_prod_idx = 0; sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; sc->spq_prod_bd = sc->spq; sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); } static void bxe_init_eq_ring(struct bxe_softc *sc) { union event_ring_elem *elem; int i; for (i = 1; i <= NUM_EQ_PAGES; i++) { elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); } sc->eq_cons = 0; sc->eq_prod = NUM_EQ_DESC; sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; atomic_store_rel_long(&sc->eq_spq_left, (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), NUM_EQ_DESC) - 1)); } static void bxe_init_internal_common(struct bxe_softc *sc) { int i; /* * Zero this manually as its initialization is currently missing * in the initTool. */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 0); } if (!CHIP_IS_E1x(sc)) { REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); } } static void bxe_init_internal(struct bxe_softc *sc, uint32_t load_code) { switch (load_code) { case FW_MSG_CODE_DRV_LOAD_COMMON: case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: bxe_init_internal_common(sc); /* no break */ case FW_MSG_CODE_DRV_LOAD_PORT: /* nothing to do */ /* no break */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: /* internal memory per function is initialized inside bxe_pf_init */ break; default: BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); break; } } static void storm_memset_func_cfg(struct bxe_softc *sc, struct tstorm_eth_function_common_config *tcfg, uint16_t abs_fid) { uint32_t addr; size_t size; addr = (BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); size = sizeof(struct tstorm_eth_function_common_config); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); } static void bxe_func_init(struct bxe_softc *sc, struct bxe_func_init_params *p) { struct tstorm_eth_function_common_config tcfg = { 0 }; if (CHIP_IS_E1x(sc)) { storm_memset_func_cfg(sc, &tcfg, p->func_id); } /* Enable the function in the FW */ storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); storm_memset_func_en(sc, p->func_id, 1); /* spq */ if (p->func_flgs & FUNC_FLG_SPQ) { storm_memset_spq_addr(sc, p->spq_map, p->func_id); REG_WR(sc, (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod); } } /* * Calculates the sum of vn_min_rates. * It's needed for further normalizing of the min_rates. * Returns: * sum of vn_min_rates. * or * 0 - if all the min_rates are 0. * In the later case fainess algorithm should be deactivated. * If all min rates are not zero then those that are zeroes will be set to 1. */ static void bxe_calc_vn_min(struct bxe_softc *sc, struct cmng_init_input *input) { uint32_t vn_cfg; uint32_t vn_min_rate; int all_zero = 1; int vn; for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { vn_cfg = sc->devinfo.mf_info.mf_config[vn]; vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100); if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { /* skip hidden VNs */ vn_min_rate = 0; } else if (!vn_min_rate) { /* If min rate is zero - set it to 100 */ vn_min_rate = DEF_MIN_RATE; } else { all_zero = 0; } input->vnic_min_rate[vn] = vn_min_rate; } /* if ETS or all min rates are zeros - disable fairness */ if (BXE_IS_ETS_ENABLED(sc)) { input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); } else if (all_zero) { input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; BLOGD(sc, DBG_LOAD, "Fariness disabled (all MIN values are zeroes)\n"); } else { input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } } static inline uint16_t bxe_extract_max_cfg(struct bxe_softc *sc, uint32_t mf_cfg) { uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); if (!max_cfg) { BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); max_cfg = 100; } return (max_cfg); } static void bxe_calc_vn_max(struct bxe_softc *sc, int vn, struct cmng_init_input *input) { uint16_t vn_max_rate; uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; uint32_t max_cfg; if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { vn_max_rate = 0; } else { max_cfg = bxe_extract_max_cfg(sc, vn_cfg); if (IS_MF_SI(sc)) { /* max_cfg in percents of linkspeed */ vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); } else { /* SD modes */ /* max_cfg is absolute in 100Mb units */ vn_max_rate = (max_cfg * 100); } } BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); input->vnic_max_rate[vn] = vn_max_rate; } static void bxe_cmng_fns_init(struct bxe_softc *sc, uint8_t read_cfg, uint8_t cmng_type) { struct cmng_init_input input; int vn; memset(&input, 0, sizeof(struct cmng_init_input)); input.port_rate = sc->link_vars.line_speed; if (cmng_type == CMNG_FNS_MINMAX) { /* read mf conf from shmem */ if (read_cfg) { bxe_read_mf_cfg(sc); } /* get VN min rate and enable fairness if not 0 */ bxe_calc_vn_min(sc, &input); /* get VN max rate */ if (sc->port.pmf) { for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { bxe_calc_vn_max(sc, vn, &input); } } /* always enable rate shaping and fairness */ input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; ecore_init_cmng(&input, &sc->cmng); return; } /* rate shaping and fairness are disabled */ BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); } static int bxe_get_cmng_fns_mode(struct bxe_softc *sc) { if (CHIP_REV_IS_SLOW(sc)) { return (CMNG_FNS_NONE); } if (IS_MF(sc)) { return (CMNG_FNS_MINMAX); } return (CMNG_FNS_NONE); } static void storm_memset_cmng(struct bxe_softc *sc, struct cmng_init *cmng, uint8_t port) { int vn; int func; uint32_t addr; size_t size; addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); size = sizeof(struct cmng_struct_per_port); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { func = func_by_vn(sc, vn); addr = (BAR_XSTRORM_INTMEM + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); size = sizeof(struct rate_shaping_vars_per_vn); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); addr = (BAR_XSTRORM_INTMEM + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); size = sizeof(struct fairness_vars_per_vn); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); } } static void bxe_pf_init(struct bxe_softc *sc) { struct bxe_func_init_params func_init = { 0 }; struct event_ring_data eq_data = { { 0 } }; uint16_t flags; if (!CHIP_IS_E1x(sc)) { /* reset IGU PF statistics: MSIX + ATTN */ /* PF */ REG_WR(sc, (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (BXE_IGU_STAS_MSG_VF_CNT * 4) + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 0); /* ATTN */ REG_WR(sc, (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (BXE_IGU_STAS_MSG_VF_CNT * 4) + (BXE_IGU_STAS_MSG_PF_CNT * 4) + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 0); } /* function setup flags */ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); /* * This flag is relevant for E1x only. * E2 doesn't have a TPA configuration in a function level. */ flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; func_init.func_flgs = flags; func_init.pf_id = SC_FUNC(sc); func_init.func_id = SC_FUNC(sc); func_init.spq_map = sc->spq_dma.paddr; func_init.spq_prod = sc->spq_prod_idx; bxe_func_init(sc, &func_init); memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); /* * Congestion management values depend on the link rate. * There is no active link so initial link rate is set to 10Gbps. * When the link comes up the congestion management values are * re-calculated according to the actual link rate. */ sc->link_vars.line_speed = SPEED_10000; bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); /* Only the PMF sets the HW */ if (sc->port.pmf) { storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } /* init Event Queue - PCI bus guarantees correct endainity */ eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); eq_data.producer = sc->eq_prod; eq_data.index_id = HC_SP_INDEX_EQ_CONS; eq_data.sb_id = DEF_SB_ID; storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); } static void bxe_hc_int_enable(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; uint32_t val = REG_RD(sc, addr); uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && (sc->intr_count == 1)) ? TRUE : FALSE; uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; if (msix) { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0); val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (single_msix) { val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; } } else if (msi) { val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (!CHIP_IS_E1(sc)) { BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); REG_WR(sc, addr, val); val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; } } if (CHIP_IS_E1(sc)) { REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); } BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); REG_WR(sc, addr, val); /* ensure that HC_CONFIG is written before leading/trailing edge config */ mb(); if (!CHIP_IS_E1(sc)) { /* init leading/trailing edge */ if (IS_MF(sc)) { val = (0xee0f | (1 << (SC_VN(sc) + 4))); if (sc->port.pmf) { /* enable nig and gpio3 attention */ val |= 0x1100; } } else { val = 0xffff; } REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); } /* make sure that interrupts are indeed enabled from here on */ mb(); } static void bxe_igu_int_enable(struct bxe_softc *sc) { uint32_t val; uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && (sc->intr_count == 1)) ? TRUE : FALSE; uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); if (msix) { val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); if (single_msix) { val |= IGU_PF_CONF_SINGLE_ISR_EN; } } else if (msi) { val &= ~IGU_PF_CONF_INT_LINE_EN; val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } else { val &= ~IGU_PF_CONF_MSI_MSIX_EN; val |= (IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } /* clean previous status - need to configure igu prior to ack*/ if ((!msix) || single_msix) { REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); bxe_ack_int(sc); } val |= IGU_PF_CONF_FUNC_EN; BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); mb(); /* init leading/trailing edge */ if (IS_MF(sc)) { val = (0xee0f | (1 << (SC_VN(sc) + 4))); if (sc->port.pmf) { /* enable nig and gpio3 attention */ val |= 0x1100; } } else { val = 0xffff; } REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); /* make sure that interrupts are indeed enabled from here on */ mb(); } static void bxe_int_enable(struct bxe_softc *sc) { if (sc->devinfo.int_block == INT_BLOCK_HC) { bxe_hc_int_enable(sc); } else { bxe_igu_int_enable(sc); } } static void bxe_hc_int_disable(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; uint32_t val = REG_RD(sc, addr); /* * In E1 we must use only PCI configuration space to disable MSI/MSIX * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC * block */ if (CHIP_IS_E1(sc)) { /* * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register * to prevent from HC sending interrupts after we exit the function */ REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); /* flush all outstanding writes */ mb(); REG_WR(sc, addr, val); if (REG_RD(sc, addr) != val) { BLOGE(sc, "proper val not read from HC IGU!\n"); } } static void bxe_igu_int_disable(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); val &= ~(IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); /* flush all outstanding writes */ mb(); REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { BLOGE(sc, "proper val not read from IGU!\n"); } } static void bxe_int_disable(struct bxe_softc *sc) { if (sc->devinfo.int_block == INT_BLOCK_HC) { bxe_hc_int_disable(sc); } else { bxe_igu_int_disable(sc); } } static void bxe_nic_init(struct bxe_softc *sc, int load_code) { int i; for (i = 0; i < sc->num_queues; i++) { bxe_init_eth_fp(sc, i); } rmb(); /* ensure status block indices were read */ bxe_init_rx_rings(sc); bxe_init_tx_rings(sc); if (IS_VF(sc)) { return; } /* initialize MOD_ABS interrupts */ elink_init_mod_abs_int(sc, &sc->link_vars, sc->devinfo.chip_id, sc->devinfo.shmem_base, sc->devinfo.shmem2_base, SC_PORT(sc)); bxe_init_def_sb(sc); bxe_update_dsb_idx(sc); bxe_init_sp_ring(sc); bxe_init_eq_ring(sc); bxe_init_internal(sc, load_code); bxe_pf_init(sc); bxe_stats_init(sc); /* flush all before enabling interrupts */ mb(); bxe_int_enable(sc); /* check for SPIO5 */ bxe_attn_int_deasserted0(sc, REG_RD(sc, (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + SC_PORT(sc)*4)) & AEU_INPUTS_ATTN_BITS_SPIO5); } static inline void bxe_init_objs(struct bxe_softc *sc) { /* mcast rules must be added to tx if tx switching is enabled */ ecore_obj_type o_type = (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : ECORE_OBJ_TYPE_RX; /* RX_MODE controlling object */ ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); /* multicast configuration controlling object */ ecore_init_mcast_obj(sc, &sc->mcast_obj, sc->fp[0].cl_id, sc->fp[0].index, SC_FUNC(sc), SC_FUNC(sc), BXE_SP(sc, mcast_rdata), BXE_SP_MAPPING(sc, mcast_rdata), ECORE_FILTER_MCAST_PENDING, &sc->sp_state, o_type); /* Setup CAM credit pools */ ecore_init_mac_credit_pool(sc, &sc->macs_pool, SC_FUNC(sc), CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : VNICS_PER_PATH(sc)); ecore_init_vlan_credit_pool(sc, &sc->vlans_pool, SC_ABS_FUNC(sc) >> 1, CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : VNICS_PER_PATH(sc)); /* RSS configuration object */ ecore_init_rss_config_obj(sc, &sc->rss_conf_obj, sc->fp[0].cl_id, sc->fp[0].index, SC_FUNC(sc), SC_FUNC(sc), BXE_SP(sc, rss_rdata), BXE_SP_MAPPING(sc, rss_rdata), ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX); } /* * Initialize the function. This must be called before sending CLIENT_SETUP * for the first client. */ static inline int bxe_func_start(struct bxe_softc *sc) { struct ecore_func_state_params func_params = { NULL }; struct ecore_func_start_params *start_params = &func_params.params.start; /* Prepare parameters for function state transitions */ bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_START; /* Function parameters */ start_params->mf_mode = sc->devinfo.mf_info.mf_mode; start_params->sd_vlan_tag = OVLAN(sc); if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { start_params->network_cos_mode = STATIC_COS; } else { /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; } //start_params->gre_tunnel_mode = 0; //start_params->gre_tunnel_rss = 0; return (ecore_func_state_change(sc, &func_params)); } static int bxe_set_power_state(struct bxe_softc *sc, uint8_t state) { uint16_t pmcsr; /* If there is no power capability, silently succeed */ if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { BLOGW(sc, "No power capability\n"); return (0); } pmcsr = pci_read_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 2); switch (state) { case PCI_PM_D0: pci_write_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); if (pmcsr & PCIM_PSTAT_DMASK) { /* delay required during transition out of D3hot */ DELAY(20000); } break; case PCI_PM_D3hot: /* XXX if there are other clients above don't shut down the power */ /* don't shut down the power for emulation and FPGA */ if (CHIP_REV_IS_SLOW(sc)) { return (0); } pmcsr &= ~PCIM_PSTAT_DMASK; pmcsr |= PCIM_PSTAT_D3; if (sc->wol) { pmcsr |= PCIM_PSTAT_PMEENABLE; } pci_write_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), pmcsr, 4); /* * No more memory access after this point until device is brought back * to D0 state. */ break; default: BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n", state, pmcsr); return (-1); } return (0); } /* return true if succeeded to acquire the lock */ static uint8_t bxe_trylock_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGD(sc, DBG_LOAD, "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return (FALSE); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); } /* try to acquire the lock */ REG_WR(sc, hw_lock_control_reg + 4, resource_bit); lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { return (TRUE); } BLOGE(sc, "Failed to get a resource lock 0x%x func %d " "lock_status 0x%x resource_bit 0x%x\n", resource, func, lock_status, resource_bit); return (FALSE); } /* * Get the recovery leader resource id according to the engine this function * belongs to. Currently only only 2 engines is supported. */ static int bxe_get_leader_lock_resource(struct bxe_softc *sc) { if (SC_PATH(sc)) { return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); } else { return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); } } /* try to acquire a leader lock for current engine */ static uint8_t bxe_trylock_leader_lock(struct bxe_softc *sc) { return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); } static int bxe_release_leader_lock(struct bxe_softc *sc) { return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); } /* close gates #2, #3 and #4 */ static void bxe_set_234_gates(struct bxe_softc *sc, uint8_t close) { uint32_t val; /* gates #2 and #4a are closed/opened for "not E1" only */ if (!CHIP_IS_E1(sc)) { /* #4 */ REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); /* #2 */ REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); } /* #3 */ if (CHIP_IS_E1x(sc)) { /* prevent interrupts from HC on both ports */ val = REG_RD(sc, HC_REG_CONFIG_1); REG_WR(sc, HC_REG_CONFIG_1, (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); val = REG_RD(sc, HC_REG_CONFIG_0); REG_WR(sc, HC_REG_CONFIG_0, (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); } else { /* Prevent incoming interrupts in IGU */ val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, (!close) ? (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); } BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); wmb(); } /* poll for pending writes bit, it should get cleared in no more than 1s */ static int bxe_er_poll_igu_vq(struct bxe_softc *sc) { uint32_t cnt = 1000; uint32_t pend_bits = 0; do { pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); if (pend_bits == 0) { break; } DELAY(1000); } while (--cnt > 0); if (cnt == 0) { BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); return (-1); } return (0); } #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ static void bxe_clp_reset_prep(struct bxe_softc *sc, uint32_t *magic_val) { /* Do some magic... */ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); *magic_val = val & SHARED_MF_CLP_MAGIC; MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); } /* restore the value of the 'magic' bit */ static void bxe_clp_reset_done(struct bxe_softc *sc, uint32_t magic_val) { /* Restore the 'magic' bit value... */ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); MFCFG_WR(sc, shared_mf_config.clp_mb, (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); } /* prepare for MCP reset, takes care of CLP configurations */ static void bxe_reset_mcp_prep(struct bxe_softc *sc, uint32_t *magic_val) { uint32_t shmem; uint32_t validity_offset; /* set `magic' bit in order to save MF config */ if (!CHIP_IS_E1(sc)) { bxe_clp_reset_prep(sc, magic_val); } /* get shmem offset */ shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); validity_offset = offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); /* Clear validity map flags */ if (shmem > 0) { REG_WR(sc, shmem + validity_offset, 0); } } #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ #define MCP_ONE_TIMEOUT 100 /* 100 ms */ static void bxe_mcp_wait_one(struct bxe_softc *sc) { /* special handling for emulation and FPGA (10 times longer) */ if (CHIP_REV_IS_SLOW(sc)) { DELAY((MCP_ONE_TIMEOUT*10) * 1000); } else { DELAY((MCP_ONE_TIMEOUT) * 1000); } } /* initialize shmem_base and waits for validity signature to appear */ static int bxe_init_shmem(struct bxe_softc *sc) { int cnt = 0; uint32_t val = 0; do { sc->devinfo.shmem_base = sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); if (sc->devinfo.shmem_base) { val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); if (val & SHR_MEM_VALIDITY_MB) return (0); } bxe_mcp_wait_one(sc); } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); BLOGE(sc, "BAD MCP validity signature\n"); return (-1); } static int bxe_reset_mcp_comp(struct bxe_softc *sc, uint32_t magic_val) { int rc = bxe_init_shmem(sc); /* Restore the `magic' bit value */ if (!CHIP_IS_E1(sc)) { bxe_clp_reset_done(sc, magic_val); } return (rc); } static void bxe_pxp_prep(struct bxe_softc *sc) { if (!CHIP_IS_E1(sc)) { REG_WR(sc, PXP2_REG_RD_START_INIT, 0); REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); wmb(); } } /* * Reset the whole chip except for: * - PCIE core * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) * - IGU * - MISC (including AEU) * - GRC * - RBCN, RBCP */ static void bxe_process_kill_chip_reset(struct bxe_softc *sc, uint8_t global) { uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; uint32_t global_bits2, stay_reset2; /* * Bits that have to be set in reset_mask2 if we want to reset 'global' * (per chip) blocks. */ global_bits2 = MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; /* * Don't reset the following blocks. * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be * reset, as in 4 port device they might still be owned * by the MCP (there is only one leader per path). */ not_reset_mask1 = MISC_REGISTERS_RESET_REG_1_RST_HC | MISC_REGISTERS_RESET_REG_1_RST_PXPV | MISC_REGISTERS_RESET_REG_1_RST_PXP; not_reset_mask2 = MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | MISC_REGISTERS_RESET_REG_2_RST_RBCN | MISC_REGISTERS_RESET_REG_2_RST_GRC | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | MISC_REGISTERS_RESET_REG_2_RST_ATC | MISC_REGISTERS_RESET_REG_2_PGLC | MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1; /* * Keep the following blocks in reset: * - all xxMACs are handled by the elink code. */ stay_reset2 = MISC_REGISTERS_RESET_REG_2_XMAC | MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; /* Full reset masks according to the chip */ reset_mask1 = 0xffffffff; if (CHIP_IS_E1(sc)) reset_mask2 = 0xffff; else if (CHIP_IS_E1H(sc)) reset_mask2 = 0x1ffff; else if (CHIP_IS_E2(sc)) reset_mask2 = 0xfffff; else /* CHIP_IS_E3 */ reset_mask2 = 0x3ffffff; /* Don't reset global blocks unless we need to */ if (!global) reset_mask2 &= ~global_bits2; /* * In case of attention in the QM, we need to reset PXP * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM * because otherwise QM reset would release 'close the gates' shortly * before resetting the PXP, then the PSWRQ would send a write * request to PGLUE. Then when PXP is reset, PGLUE would try to * read the payload data from PSWWR, but PSWWR would not * respond. The write queue in PGLUE would stuck, dmae commands * would not return. Therefore it's important to reset the second * reset register (containing the * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM * bit). */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, reset_mask2 & (~not_reset_mask2)); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, reset_mask1 & (~not_reset_mask1)); mb(); wmb(); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2 & (~stay_reset2)); mb(); wmb(); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); wmb(); } static int bxe_process_kill(struct bxe_softc *sc, uint8_t global) { int cnt = 1000; uint32_t val = 0; uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; uint32_t tags_63_32 = 0; /* Empty the Tetris buffer, wait for 1s */ do { sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); if (CHIP_IS_E3(sc)) { tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); } if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && ((port_is_idle_0 & 0x1) == 0x1) && ((port_is_idle_1 & 0x1) == 0x1) && (pgl_exp_rom2 == 0xffffffff) && (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) break; DELAY(1000); } while (cnt-- > 0); if (cnt <= 0) { BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " "are still outstanding read requests after 1s! " "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2); return (-1); } mb(); /* Close gates #2, #3 and #4 */ bxe_set_234_gates(sc, TRUE); /* Poll for IGU VQs for 57712 and newer chips */ if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { return (-1); } /* XXX indicate that "process kill" is in progress to MCP */ /* clear "unprepared" bit */ REG_WR(sc, MISC_REG_UNPREPARED, 0); mb(); /* Make sure all is written to the chip before the reset */ wmb(); /* * Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ DELAY(1000); /* Prepare to chip reset: */ /* MCP */ if (global) { bxe_reset_mcp_prep(sc, &val); } /* PXP */ bxe_pxp_prep(sc); mb(); /* reset the chip */ bxe_process_kill_chip_reset(sc, global); mb(); /* clear errors in PGB */ if (!CHIP_IS_E1(sc)) REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); /* Recover after reset: */ /* MCP */ if (global && bxe_reset_mcp_comp(sc, val)) { return (-1); } /* XXX add resetting the NO_MCP mode DB here */ /* Open the gates #2, #3 and #4 */ bxe_set_234_gates(sc, FALSE); /* XXX * IGU/AEU preparation bring back the AEU/IGU to a reset state * re-enable attentions */ return (0); } static int bxe_leader_reset(struct bxe_softc *sc) { int rc = 0; uint8_t global = bxe_reset_is_global(sc); uint32_t load_code; /* * If not going to reset MCP, load "fake" driver to reset HW while * driver is owner of the HW. */ if (!global && !BXE_NOMCP(sc)) { load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, DRV_MSG_CODE_LOAD_REQ_WITH_LFA); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; goto exit_leader_reset; } if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { BLOGE(sc, "MCP unexpected response, aborting\n"); rc = -1; goto exit_leader_reset2; } load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; goto exit_leader_reset2; } } /* try to recover after the failure */ if (bxe_process_kill(sc, global)) { BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); rc = -1; goto exit_leader_reset2; } /* * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver * state. */ bxe_set_reset_done(sc); if (global) { bxe_clear_reset_global(sc); } exit_leader_reset2: /* unload "fake driver" if it was loaded */ if (!global && !BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); } exit_leader_reset: sc->is_leader = 0; bxe_release_leader_lock(sc); mb(); return (rc); } /* * prepare INIT transition, parameters configured: * - HC configuration * - Queue's CDU context */ static void bxe_pf_q_prep_init(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_queue_init_params *init_params) { uint8_t cos; int cxt_index, cxt_offset; bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); /* HC rate */ init_params->rx.hc_rate = sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; init_params->tx.hc_rate = sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; /* FW SB ID */ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; /* CQ index among the SB indices */ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; /* set maximum number of COSs supported by this queue */ init_params->max_cos = sc->max_cos; BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", fp->index, init_params->max_cos); /* set the context pointers queue object */ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { /* XXX change index/cid here if ever support multiple tx CoS */ /* fp->txdata[cos]->cid */ cxt_index = fp->index / ILT_PAGE_CIDS; cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; } } /* set flags that are common for the Tx-only and not normal connections */ static unsigned long bxe_get_common_flags(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t zero_stats) { unsigned long flags = 0; /* PF driver will always initialize the Queue to an ACTIVE state */ bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); /* * tx only connections collect statistics (on the same index as the * parent connection). The statistics are zeroed when the parent * connection is initialized. */ bxe_set_bit(ECORE_Q_FLG_STATS, &flags); if (zero_stats) { bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); } /* * tx only connections can support tx-switching, though their * CoS-ness doesn't survive the loopback */ if (sc->flags & BXE_TX_SWITCHING) { bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); } bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); return (flags); } static unsigned long bxe_get_q_flags(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t leading) { unsigned long flags = 0; if (IS_MF_SD(sc)) { bxe_set_bit(ECORE_Q_FLG_OV, &flags); } if (if_getcapenable(sc->ifp) & IFCAP_LRO) { bxe_set_bit(ECORE_Q_FLG_TPA, &flags); bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); } if (leading) { bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); } bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); /* merge with common flags */ return (flags | bxe_get_common_flags(sc, fp, TRUE)); } static void bxe_pf_q_prep_general(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_general_setup_params *gen_init, uint8_t cos) { gen_init->stat_id = bxe_stats_id(fp); gen_init->spcl_id = fp->cl_id; gen_init->mtu = sc->mtu; gen_init->cos = cos; } static void bxe_pf_rx_q_prep(struct bxe_softc *sc, struct bxe_fastpath *fp, struct rxq_pause_params *pause, struct ecore_rxq_setup_params *rxq_init) { uint8_t max_sge = 0; uint16_t sge_sz = 0; uint16_t tpa_agg_size = 0; pause->sge_th_lo = SGE_TH_LO(sc); pause->sge_th_hi = SGE_TH_HI(sc); /* validate SGE ring has enough to cross high threshold */ if (sc->dropless_fc && (pause->sge_th_hi + FW_PREFETCH_CNT) > (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { BLOGW(sc, "sge ring threshold limit\n"); } /* minimum max_aggregation_size is 2*MTU (two full buffers) */ tpa_agg_size = (2 * sc->mtu); if (tpa_agg_size < sc->max_aggregation_size) { tpa_agg_size = sc->max_aggregation_size; } max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; max_sge = ((max_sge + PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); /* pause - not for e1 */ if (!CHIP_IS_E1(sc)) { pause->bd_th_lo = BD_TH_LO(sc); pause->bd_th_hi = BD_TH_HI(sc); pause->rcq_th_lo = RCQ_TH_LO(sc); pause->rcq_th_hi = RCQ_TH_HI(sc); /* validate rings have enough entries to cross high thresholds */ if (sc->dropless_fc && pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) { BLOGW(sc, "rx bd ring threshold limit\n"); } if (sc->dropless_fc && pause->rcq_th_hi + FW_PREFETCH_CNT > RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { BLOGW(sc, "rcq ring threshold limit\n"); } pause->pri_map = 1; } /* rxq setup */ rxq_init->dscr_map = fp->rx_dma.paddr; rxq_init->sge_map = fp->rx_sge_dma.paddr; rxq_init->rcq_map = fp->rcq_dma.paddr; rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); /* * This should be a maximum number of data bytes that may be * placed on the BD (not including paddings). */ rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING); rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; rxq_init->sge_buf_sz = sge_sz; rxq_init->max_sges_pkt = max_sge; rxq_init->rss_engine_id = SC_FUNC(sc); rxq_init->mcast_engine_id = SC_FUNC(sc); /* * Maximum number or simultaneous TPA aggregation for this Queue. * For PF Clients it should be the maximum available number. * VF driver(s) may want to define it to a smaller value. */ rxq_init->max_tpa_queues = MAX_AGG_QS(sc); rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; rxq_init->fw_sb_id = fp->fw_sb_id; rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; /* * configure silent vlan removal * if multi function mode is afex, then mask default vlan */ if (IS_MF_AFEX(sc)) { rxq_init->silent_removal_value = sc->devinfo.mf_info.afex_def_vlan_tag; rxq_init->silent_removal_mask = EVL_VLID_MASK; } } static void bxe_pf_tx_q_prep(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_txq_setup_params *txq_init, uint8_t cos) { /* * XXX If multiple CoS is ever supported then each fastpath structure * will need to maintain tx producer/consumer/dma/etc values *per* CoS. * fp->txdata[cos]->tx_dma.paddr; */ txq_init->dscr_map = fp->tx_dma.paddr; txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; txq_init->fw_sb_id = fp->fw_sb_id; /* * set the TSS leading client id for TX classfication to the * leading RSS client id */ txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); } /* * This function performs 2 steps in a queue state machine: * 1) RESET->INIT * 2) INIT->SETUP */ static int bxe_setup_queue(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t leading) { struct ecore_queue_state_params q_params = { NULL }; struct ecore_queue_setup_params *setup_params = &q_params.params.setup; int rc; BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; /* we want to wait for completion in this context */ bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* prepare the INIT parameters */ bxe_pf_q_prep_init(sc, fp, &q_params.params.init); /* Set the command */ q_params.cmd = ECORE_Q_CMD_INIT; /* Change the state to INIT */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc); return (rc); } BLOGD(sc, DBG_LOAD, "init complete\n"); /* now move the Queue to the SETUP state */ memset(setup_params, 0, sizeof(*setup_params)); /* set Queue flags */ setup_params->flags = bxe_get_q_flags(sc, fp, leading); /* set general SETUP parameters */ bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, FIRST_TX_COS_INDEX); bxe_pf_rx_q_prep(sc, fp, &setup_params->pause_params, &setup_params->rxq_params); bxe_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); /* Set the command */ q_params.cmd = ECORE_Q_CMD_SETUP; /* change the state to SETUP */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc); return (rc); } return (rc); } static int bxe_setup_leading(struct bxe_softc *sc) { return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); } static int bxe_config_rss_pf(struct bxe_softc *sc, struct ecore_rss_config_obj *rss_obj, uint8_t config_hash) { struct ecore_config_rss_params params = { NULL }; int i; /* * Although RSS is meaningless when there is a single HW queue we * still need it enabled in order to have HW Rx hash generated. */ params.rss_obj = rss_obj; bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); /* RSS configuration */ bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); if (rss_obj->udp_rss_v4) { bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); } if (rss_obj->udp_rss_v6) { bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); } /* Hash bits */ params.rss_result_mask = MULTI_MASK; memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); if (config_hash) { /* RSS keys */ for (i = 0; i < sizeof(params.rss_key) / 4; i++) { params.rss_key[i] = arc4random(); } bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); } return (ecore_config_rss(sc, ¶ms)); } static int bxe_config_rss_eth(struct bxe_softc *sc, uint8_t config_hash) { return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); } static int bxe_init_rss_pf(struct bxe_softc *sc) { uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); int i; /* * Prepare the initial contents of the indirection table if * RSS is enabled */ for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { sc->rss_conf_obj.ind_table[i] = (sc->fp->cl_id + (i % num_eth_queues)); } if (sc->udp_rss) { sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; } /* * For 57710 and 57711 SEARCHER configuration (rss_keys) is * per-port, so if explicit configuration is needed, do it only * for a PMF. * * For 57712 and newer it's a per-function configuration. */ return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); } static int bxe_set_mac_one(struct bxe_softc *sc, uint8_t *mac, struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type, unsigned long *ramrod_flags) { struct ecore_vlan_mac_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* fill in general parameters */ ramrod_param.vlan_mac_obj = obj; ramrod_param.ramrod_flags = *ramrod_flags; /* fill a user request section if needed */ if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); /* Set the command: ADD or DEL */ ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : ECORE_VLAN_MAC_DEL; } rc = ecore_config_vlan_mac(sc, &ramrod_param); if (rc == ECORE_EXISTS) { BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); /* do not treat adding same MAC as error */ rc = 0; } else if (rc < 0) { BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); } return (rc); } static int bxe_set_eth_mac(struct bxe_softc *sc, uint8_t set) { unsigned long ramrod_flags = 0; BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Eth MAC is set on RSS leading client (fp[0]) */ return (bxe_set_mac_one(sc, sc->link_params.mac_addr, &sc->sp_objs->mac_obj, set, ECORE_ETH_MAC, &ramrod_flags)); } static int bxe_get_cur_phy_idx(struct bxe_softc *sc) { uint32_t sel_phy_idx = 0; if (sc->link_params.num_phys <= 1) { return (ELINK_INT_PHY); } if (sc->link_vars.link_up) { sel_phy_idx = ELINK_EXT_PHY1; /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && (sc->link_params.phy[ELINK_EXT_PHY2].supported & ELINK_SUPPORTED_FIBRE)) sel_phy_idx = ELINK_EXT_PHY2; } else { switch (elink_phy_selection(&sc->link_params)) { case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: sel_phy_idx = ELINK_EXT_PHY1; break; case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: sel_phy_idx = ELINK_EXT_PHY2; break; } } return (sel_phy_idx); } static int bxe_get_link_cfg_idx(struct bxe_softc *sc) { uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); /* * The selected activated PHY is always after swapping (in case PHY * swapping is enabled). So when swapping is enabled, we need to reverse * the configuration */ if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { if (sel_phy_idx == ELINK_EXT_PHY1) sel_phy_idx = ELINK_EXT_PHY2; else if (sel_phy_idx == ELINK_EXT_PHY2) sel_phy_idx = ELINK_EXT_PHY1; } return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); } static void bxe_set_requested_fc(struct bxe_softc *sc) { /* * Initialize link parameters structure variables * It is recommended to turn off RX FC for jumbo frames * for better performance */ if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; } else { sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; } } static void bxe_calc_fc_adv(struct bxe_softc *sc) { uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); switch (sc->link_vars.ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; break; default: break; } } static uint16_t bxe_get_mf_speed(struct bxe_softc *sc) { uint16_t line_speed = sc->link_vars.line_speed; if (IS_MF(sc)) { uint16_t maxCfg = bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); /* calculate the current MAX line speed limit for the MF devices */ if (IS_MF_SI(sc)) { line_speed = (line_speed * maxCfg) / 100; } else { /* SD mode */ uint16_t vn_max_rate = maxCfg * 100; if (vn_max_rate < line_speed) { line_speed = vn_max_rate; } } } return (line_speed); } static void bxe_fill_report_data(struct bxe_softc *sc, struct bxe_link_report_data *data) { uint16_t line_speed = bxe_get_mf_speed(sc); memset(data, 0, sizeof(*data)); /* fill the report data with the effective line speed */ data->line_speed = line_speed; /* Link is down */ if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); } /* Full DUPLEX */ if (sc->link_vars.duplex == DUPLEX_FULL) { bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); } /* Rx Flow Control is ON */ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); } /* Tx Flow Control is ON */ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); } } /* report link status to OS, should be called under phy_lock */ static void bxe_link_report_locked(struct bxe_softc *sc) { struct bxe_link_report_data cur_data; /* reread mf_cfg */ if (IS_PF(sc) && !CHIP_IS_E1(sc)) { bxe_read_mf_cfg(sc); } /* Read the current link report info */ bxe_fill_report_data(sc, &cur_data); /* Don't report link down or exactly the same link status twice */ if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &sc->last_reported_link.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags))) { return; } ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n", cur_data.link_report_flags, sc->last_reported_link.link_report_flags); sc->link_cnt++; ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt); /* report new link params and remember the state for the next time */ memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags)) { if_link_state_change(sc->ifp, LINK_STATE_DOWN); } else { const char *duplex; const char *flow; if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, &cur_data.link_report_flags)) { duplex = "full"; ELINK_DEBUG_P0(sc, "link set to full duplex\n"); } else { duplex = "half"; ELINK_DEBUG_P0(sc, "link set to half duplex\n"); } /* * Handle the FC at the end so that only these flags would be * possibly set. This way we may easily check if there is no FC * enabled. */ if (cur_data.link_report_flags) { if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - receive & transmit"; } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - receive"; } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - transmit"; } else { flow = "none"; /* possible? */ } } else { flow = "none"; } if_link_state_change(sc->ifp, LINK_STATE_UP); BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", cur_data.line_speed, duplex, flow); } } static void bxe_link_report(struct bxe_softc *sc) { bxe_acquire_phy_lock(sc); bxe_link_report_locked(sc); bxe_release_phy_lock(sc); } static void bxe_link_status_update(struct bxe_softc *sc) { if (sc->state != BXE_STATE_OPEN) { return; } if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { elink_link_status_update(&sc->link_params, &sc->link_vars); } else { sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | ELINK_SUPPORTED_10baseT_Full | ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full | ELINK_SUPPORTED_1000baseT_Full | ELINK_SUPPORTED_2500baseX_Full | ELINK_SUPPORTED_10000baseT_Full | ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE | ELINK_SUPPORTED_Autoneg | ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause); sc->port.advertising[0] = sc->port.supported[0]; sc->link_params.sc = sc; sc->link_params.port = SC_PORT(sc); sc->link_params.req_duplex[0] = DUPLEX_FULL; sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; sc->link_params.req_line_speed[0] = SPEED_10000; sc->link_params.speed_cap_mask[0] = 0x7f0000; sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; if (CHIP_REV_IS_FPGA(sc)) { sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; sc->link_vars.line_speed = ELINK_SPEED_1000; sc->link_vars.link_status = (LINK_STATUS_LINK_UP | LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); } else { sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; sc->link_vars.line_speed = ELINK_SPEED_10000; sc->link_vars.link_status = (LINK_STATUS_LINK_UP | LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); } sc->link_vars.link_up = 1; sc->link_vars.duplex = DUPLEX_FULL; sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; if (IS_PF(sc)) { REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); bxe_stats_handle(sc, STATS_EVENT_LINK_UP); bxe_link_report(sc); } } if (IS_PF(sc)) { if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } else { bxe_stats_handle(sc, STATS_EVENT_STOP); } bxe_link_report(sc); } else { bxe_link_report(sc); bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } } static int bxe_initial_phy_init(struct bxe_softc *sc, int load_mode) { int rc, cfg_idx = bxe_get_link_cfg_idx(sc); uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; struct elink_params *lp = &sc->link_params; bxe_set_requested_fc(sc); if (CHIP_REV_IS_SLOW(sc)) { uint32_t bond = CHIP_BOND_ID(sc); uint32_t feat = 0; if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; } else if (bond & 0x4) { if (CHIP_IS_E3(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; } else { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; } } else if (bond & 0x8) { if (CHIP_IS_E3(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; } else { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; } } /* disable EMAC for E3 and above */ if (bond & 0x2) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; } sc->link_params.feature_config_flags |= feat; } bxe_acquire_phy_lock(sc); if (load_mode == LOAD_DIAG) { lp->loopback_mode = ELINK_LOOPBACK_XGXS; /* Prefer doing PHY loopback at 10G speed, if possible */ if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { if (lp->speed_cap_mask[cfg_idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; } else { lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; } } } if (load_mode == LOAD_LOOPBACK_EXT) { lp->loopback_mode = ELINK_LOOPBACK_EXT; } rc = elink_phy_init(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); bxe_calc_fc_adv(sc); if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); bxe_link_report(sc); } if (!CHIP_REV_IS_SLOW(sc)) { bxe_periodic_start(sc); } sc->link_params.req_line_speed[cfg_idx] = req_line_speed; return (rc); } static u_int bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct ecore_mcast_list_elem *mc_mac = arg; mc_mac += cnt; mc_mac->mac = (uint8_t *)LLADDR(sdl); return (1); } static int bxe_init_mcast_macs_list(struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p) { if_t ifp = sc->ifp; int mc_count; struct ecore_mcast_list_elem *mc_mac; ECORE_LIST_INIT(&p->mcast_list); p->mcast_list_len = 0; /* XXXGL: multicast count may change later */ mc_count = if_llmaddr_count(ifp); if (!mc_count) { return (0); } mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO)); if (!mc_mac) { BLOGE(sc, "Failed to allocate temp mcast list\n"); return (-1); } bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac); for (int i = 0; i < mc_count; i ++) { ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list); BLOGD(sc, DBG_LOAD, "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n", mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2], mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5], mc_count); } p->mcast_list_len = mc_count; return (0); } static void bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p) { struct ecore_mcast_list_elem *mc_mac = ECORE_LIST_FIRST_ENTRY(&p->mcast_list, struct ecore_mcast_list_elem, link); if (mc_mac) { /* only a single free as all mc_macs are in the same heap array */ free(mc_mac, M_DEVBUF); } } static int bxe_set_mc_list(struct bxe_softc *sc) { struct ecore_mcast_ramrod_params rparam = { NULL }; int rc = 0; rparam.mcast_obj = &sc->mcast_obj; BXE_MCAST_LOCK(sc); /* first, clear all configured multicast MACs */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); /* Manual backport parts of FreeBSD upstream r284470. */ BXE_MCAST_UNLOCK(sc); return (rc); } /* configure a new MACs list */ rc = bxe_init_mcast_macs_list(sc, &rparam); if (rc) { BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc); BXE_MCAST_UNLOCK(sc); return (rc); } /* Now add the new MACs */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); if (rc < 0) { BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); } bxe_free_mcast_macs_list(&rparam); BXE_MCAST_UNLOCK(sc); return (rc); } struct bxe_set_addr_ctx { struct bxe_softc *sc; unsigned long ramrod_flags; int rc; }; static u_int bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct bxe_set_addr_ctx *ctx = arg; struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj; int rc; if (ctx->rc < 0) return (0); rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE, ECORE_UC_LIST_MAC, &ctx->ramrod_flags); /* do not treat adding same MAC as an error */ if (rc == -EEXIST) BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); else if (rc < 0) { BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc); ctx->rc = rc; } return (1); } static int bxe_set_uc_list(struct bxe_softc *sc) { if_t ifp = sc->ifp; struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; struct bxe_set_addr_ctx ctx = { sc, 0, 0 }; int rc; /* first schedule a cleanup up of old configuration */ rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); if (rc < 0) { BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); return (rc); } if_foreach_lladdr(ifp, bxe_set_addr, &ctx); if (ctx.rc < 0) return (ctx.rc); /* Execute the pending commands */ bit_set(&ctx.ramrod_flags, RAMROD_CONT); return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, ECORE_UC_LIST_MAC, &ctx.ramrod_flags)); } static void bxe_set_rx_mode(struct bxe_softc *sc) { if_t ifp = sc->ifp; uint32_t rx_mode = BXE_RX_MODE_NORMAL; if (sc->state != BXE_STATE_OPEN) { BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); return; } BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); if (if_getflags(ifp) & IFF_PROMISC) { rx_mode = BXE_RX_MODE_PROMISC; } else if ((if_getflags(ifp) & IFF_ALLMULTI) || ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && CHIP_IS_E1(sc))) { rx_mode = BXE_RX_MODE_ALLMULTI; } else { if (IS_PF(sc)) { /* some multicasts */ if (bxe_set_mc_list(sc) < 0) { rx_mode = BXE_RX_MODE_ALLMULTI; } if (bxe_set_uc_list(sc) < 0) { rx_mode = BXE_RX_MODE_PROMISC; } } } sc->rx_mode = rx_mode; /* schedule the rx_mode command */ if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); return; } if (IS_PF(sc)) { bxe_set_storm_rx_mode(sc); } } /* update flags in shmem */ static void bxe_update_drv_flags(struct bxe_softc *sc, uint32_t flags, uint32_t set) { uint32_t drv_flags; if (SHMEM2_HAS(sc, drv_flags)) { bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); drv_flags = SHMEM2_RD(sc, drv_flags); if (set) { SET_FLAGS(drv_flags, flags); } else { RESET_FLAGS(drv_flags, flags); } SHMEM2_WR(sc, drv_flags, drv_flags); BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); } } /* periodic timer callout routine, only runs when the interface is up */ static void bxe_periodic_callout_func(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; int i; if (!BXE_CORE_TRYLOCK(sc)) { /* just bail and try again next time */ if ((sc->state == BXE_STATE_OPEN) && (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { /* schedule the next periodic callout */ callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } return; } if ((sc->state != BXE_STATE_OPEN) || (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); BXE_CORE_UNLOCK(sc); return; } /* Check for TX timeouts on any fastpath. */ FOR_EACH_QUEUE(sc, i) { if (bxe_watchdog(sc, &sc->fp[i]) != 0) { /* Ruh-Roh, chip was reset! */ break; } } if (!CHIP_REV_IS_SLOW(sc)) { /* * This barrier is needed to ensure the ordering between the writing * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and * the reading here. */ mb(); if (sc->port.pmf) { bxe_acquire_phy_lock(sc); elink_period_func(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); } } if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { int mb_idx = SC_FW_MB_IDX(sc); uint32_t drv_pulse; uint32_t mcp_pulse; ++sc->fw_drv_pulse_wr_seq; sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; drv_pulse = sc->fw_drv_pulse_wr_seq; bxe_drv_pulse(sc); mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & MCP_PULSE_SEQ_MASK); /* * The delta between driver pulse and mcp response should * be 1 (before mcp response) or 0 (after mcp response). */ if ((drv_pulse != mcp_pulse) && (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { /* someone lost a heartbeat... */ BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", drv_pulse, mcp_pulse); } } /* state is BXE_STATE_OPEN */ bxe_stats_handle(sc, STATS_EVENT_UPDATE); BXE_CORE_UNLOCK(sc); if ((sc->state == BXE_STATE_OPEN) && (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { /* schedule the next periodic callout */ callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } } static void bxe_periodic_start(struct bxe_softc *sc) { atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } static void bxe_periodic_stop(struct bxe_softc *sc) { atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); callout_drain(&sc->periodic_callout); } void bxe_parity_recover(struct bxe_softc *sc) { uint8_t global = FALSE; uint32_t error_recovered, error_unrecovered; bool is_parity; if ((sc->recovery_state == BXE_RECOVERY_FAILED) && (sc->state == BXE_STATE_ERROR)) { BLOGE(sc, "RECOVERY failed, " "stack notified driver is NOT running! " "Please reboot/power cycle the system.\n"); return; } while (1) { BLOGD(sc, DBG_SP, "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n", __func__, sc, sc->state, sc->recovery_state, sc->error_status); switch(sc->recovery_state) { case BXE_RECOVERY_INIT: is_parity = bxe_chk_parity_attn(sc, &global, FALSE); if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) || (sc->error_status & BXE_ERR_MCP_ASSERT) || (sc->error_status & BXE_ERR_GLOBAL)) { BXE_CORE_LOCK(sc); if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); } bxe_nic_unload(sc, UNLOAD_RECOVERY, false); sc->state = BXE_STATE_ERROR; sc->recovery_state = BXE_RECOVERY_FAILED; BLOGE(sc, " No Recovery tried for error 0x%x" " stack notified driver is NOT running!" " Please reboot/power cycle the system.\n", sc->error_status); BXE_CORE_UNLOCK(sc); return; } /* Try to get a LEADER_LOCK HW lock */ if (bxe_trylock_leader_lock(sc)) { bxe_set_reset_in_progress(sc); /* * Check if there is a global attention and if * there was a global attention, set the global * reset bit. */ if (global) { bxe_set_reset_global(sc); } sc->is_leader = 1; } /* If interface has been removed - break */ if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); } BXE_CORE_LOCK(sc); bxe_nic_unload(sc,UNLOAD_RECOVERY, false); sc->recovery_state = BXE_RECOVERY_WAIT; BXE_CORE_UNLOCK(sc); /* * Ensure "is_leader", MCP command sequence and * "recovery_state" update values are seen on other * CPUs. */ mb(); break; case BXE_RECOVERY_WAIT: if (sc->is_leader) { int other_engine = SC_PATH(sc) ? 0 : 1; bool other_load_status = bxe_get_load_status(sc, other_engine); bool load_status = bxe_get_load_status(sc, SC_PATH(sc)); global = bxe_reset_is_global(sc); /* * In case of a parity in a global block, let * the first leader that performs a * leader_reset() reset the global blocks in * order to clear global attentions. Otherwise * the gates will remain closed for that * engine. */ if (load_status || (global && other_load_status)) { /* * Wait until all other functions get * down. */ taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return; } else { /* * If all other functions got down * try to bring the chip back to * normal. In any case it's an exit * point for a leader. */ if (bxe_leader_reset(sc)) { BLOGE(sc, "RECOVERY failed, " "stack notified driver is NOT running!\n"); sc->recovery_state = BXE_RECOVERY_FAILED; sc->state = BXE_STATE_ERROR; mb(); return; } /* * If we are here, means that the * leader has succeeded and doesn't * want to be a leader any more. Try * to continue as a none-leader. */ break; } } else { /* non-leader */ if (!bxe_reset_is_done(sc, SC_PATH(sc))) { /* * Try to get a LEADER_LOCK HW lock as * long as a former leader may have * been unloaded by the user or * released a leadership by another * reason. */ if (bxe_trylock_leader_lock(sc)) { /* * I'm a leader now! Restart a * switch case. */ sc->is_leader = 1; break; } taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return; } else { /* * If there was a global attention, wait * for it to be cleared. */ if (bxe_reset_is_global(sc)) { taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return; } error_recovered = sc->eth_stats.recoverable_error; error_unrecovered = sc->eth_stats.unrecoverable_error; BXE_CORE_LOCK(sc); sc->recovery_state = BXE_RECOVERY_NIC_LOADING; if (bxe_nic_load(sc, LOAD_NORMAL)) { error_unrecovered++; sc->recovery_state = BXE_RECOVERY_FAILED; sc->state = BXE_STATE_ERROR; BLOGE(sc, "Recovery is NOT successfull, " " state=0x%x recovery_state=0x%x error=%x\n", sc->state, sc->recovery_state, sc->error_status); sc->error_status = 0; } else { sc->recovery_state = BXE_RECOVERY_DONE; error_recovered++; BLOGI(sc, "Recovery is successfull from errors %x," " state=0x%x" " recovery_state=0x%x \n", sc->error_status, sc->state, sc->recovery_state); mb(); } sc->error_status = 0; BXE_CORE_UNLOCK(sc); sc->eth_stats.recoverable_error = error_recovered; sc->eth_stats.unrecoverable_error = error_unrecovered; return; } } default: return; } } } void bxe_handle_error(struct bxe_softc * sc) { if(sc->recovery_state == BXE_RECOVERY_WAIT) { return; } if(sc->error_status) { if (sc->state == BXE_STATE_OPEN) { bxe_int_disable(sc); } if (sc->link_vars.link_up) { if_link_state_change(sc->ifp, LINK_STATE_DOWN); } sc->recovery_state = BXE_RECOVERY_INIT; BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n", sc->unit, sc->error_status, sc->recovery_state); bxe_parity_recover(sc); } } static void bxe_sp_err_timeout_task(void *arg, int pending) { struct bxe_softc *sc = (struct bxe_softc *)arg; BLOGD(sc, DBG_SP, "%s state = 0x%x rec state=0x%x error_status=%x\n", __func__, sc->state, sc->recovery_state, sc->error_status); if((sc->recovery_state == BXE_RECOVERY_FAILED) && (sc->state == BXE_STATE_ERROR)) { return; } /* if can be taken */ if ((sc->error_status) && (sc->trigger_grcdump)) { bxe_grc_dump(sc); } if (sc->recovery_state != BXE_RECOVERY_DONE) { bxe_handle_error(sc); bxe_parity_recover(sc); } else if (sc->error_status) { bxe_handle_error(sc); } return; } /* start the controller */ static __noinline int bxe_nic_load(struct bxe_softc *sc, int load_mode) { uint32_t val; int load_code = 0; int i, rc = 0; BXE_CORE_LOCK_ASSERT(sc); BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); sc->state = BXE_STATE_OPENING_WAITING_LOAD; if (IS_PF(sc)) { /* must be called before memory allocation and HW init */ bxe_ilt_set_info(sc); } sc->last_reported_link_state = LINK_STATE_UNKNOWN; bxe_set_fp_rx_buf_size(sc); if (bxe_alloc_fp_buffers(sc) != 0) { BLOGE(sc, "Failed to allocate fastpath memory\n"); sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (bxe_alloc_mem(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (bxe_alloc_fw_stats_mem(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (IS_PF(sc)) { /* set pf load just before approaching the MCP */ bxe_set_pf_load(sc); /* if MCP exists send load request and analyze response */ if (!BXE_NOMCP(sc)) { /* attempt to load pf */ if (bxe_nic_load_request(sc, &load_code) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error1; } /* what did the MCP say? */ if (bxe_nic_load_analyze_req(sc, load_code) != 0) { bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } } else { BLOGI(sc, "Device has no MCP!\n"); load_code = bxe_nic_load_no_mcp(sc); } /* mark PMF if applicable */ bxe_nic_load_pmf(sc, load_code); /* Init Function state controlling object */ bxe_init_func_obj(sc); /* Initialize HW */ if (bxe_init_hw(sc, load_code) != 0) { BLOGE(sc, "HW init failed\n"); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } } /* set ALWAYS_ALIVE bit in shmem */ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bxe_drv_pulse(sc); sc->flags |= BXE_NO_PULSE; /* attach interrupts */ if (bxe_interrupt_attach(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } bxe_nic_init(sc, load_code); /* Init per-function objects */ if (IS_PF(sc)) { bxe_init_objs(sc); // XXX bxe_iov_nic_init(sc); /* set AFEX default VLAN tag to an invalid value */ sc->devinfo.mf_info.afex_def_vlan_tag = -1; // XXX bxe_nic_load_afex_dcc(sc, load_code); sc->state = BXE_STATE_OPENING_WAITING_PORT; rc = bxe_func_start(sc); if (rc) { BLOGE(sc, "Function start failed! rc = %d\n", rc); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } /* send LOAD_DONE command to MCP */ if (!BXE_NOMCP(sc)) { load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); sc->state = BXE_STATE_ERROR; rc = ENXIO; goto bxe_nic_load_error3; } } rc = bxe_setup_leading(sc); if (rc) { BLOGE(sc, "Setup leading failed! rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); if (rc) { BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } rc = bxe_init_rss_pf(sc); if (rc) { BLOGE(sc, "PF RSS init failed\n"); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } /* XXX VF */ /* now when Clients are configured we are ready to work */ sc->state = BXE_STATE_OPEN; /* Configure a ucast MAC */ if (IS_PF(sc)) { rc = bxe_set_eth_mac(sc, TRUE); } if (rc) { BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } if (sc->port.pmf) { rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); if (rc) { sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; /* start fast path */ /* Initialize Rx filter */ bxe_set_rx_mode(sc); /* start the Tx */ switch (/* XXX load_mode */LOAD_OPEN) { case LOAD_NORMAL: case LOAD_OPEN: break; case LOAD_DIAG: case LOAD_LOOPBACK_EXT: sc->state = BXE_STATE_DIAG; break; default: break; } if (sc->port.pmf) { bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); } else { bxe_link_status_update(sc); } /* start the periodic timer callout */ bxe_periodic_start(sc); if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { /* mark driver is loaded in shmem2 */ val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], (val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | DRV_FLAGS_CAPABILITIES_LOADED_L2)); } /* wait for all pending SP commands to complete */ if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); bxe_periodic_stop(sc); bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); return (ENXIO); } /* Tell the stack the driver is running! */ if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); return (0); bxe_nic_load_error3: if (IS_PF(sc)) { bxe_int_disable_sync(sc, 1); /* clean out queued objects */ bxe_squeeze_objects(sc); } bxe_interrupt_detach(sc); bxe_nic_load_error2: if (IS_PF(sc) && !BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); } sc->port.pmf = 0; bxe_nic_load_error1: /* clear pf_load status, as it was already set */ if (IS_PF(sc)) { bxe_clear_pf_load(sc); } bxe_nic_load_error0: bxe_free_fw_stats_mem(sc); bxe_free_fp_buffers(sc); bxe_free_mem(sc); return (rc); } static int bxe_init_locked(struct bxe_softc *sc) { int other_engine = SC_PATH(sc) ? 0 : 1; uint8_t other_load_status, load_status; uint8_t global = FALSE; int rc; BXE_CORE_LOCK_ASSERT(sc); /* check if the driver is already running */ if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); return (0); } if((sc->state == BXE_STATE_ERROR) && (sc->recovery_state == BXE_RECOVERY_FAILED)) { BLOGE(sc, "Initialization not done, " "as previous recovery failed." "Reboot/Power-cycle the system\n" ); return (ENXIO); } bxe_set_power_state(sc, PCI_PM_D0); /* * If parity occurred during the unload, then attentions and/or * RECOVERY_IN_PROGRES may still be set. If so we want the first function * loaded on the current engine to complete the recovery. Parity recovery * is only relevant for PF driver. */ if (IS_PF(sc)) { other_load_status = bxe_get_load_status(sc, other_engine); load_status = bxe_get_load_status(sc, SC_PATH(sc)); if (!bxe_reset_is_done(sc, SC_PATH(sc)) || bxe_chk_parity_attn(sc, &global, TRUE)) { do { /* * If there are attentions and they are in global blocks, set * the GLOBAL_RESET bit regardless whether it will be this * function that will complete the recovery or not. */ if (global) { bxe_set_reset_global(sc); } /* * Only the first function on the current engine should try * to recover in open. In case of attentions in global blocks * only the first in the chip should try to recover. */ if ((!load_status && (!global || !other_load_status)) && bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { BLOGI(sc, "Recovered during init\n"); break; } /* recovery has failed... */ bxe_set_power_state(sc, PCI_PM_D3hot); sc->recovery_state = BXE_RECOVERY_FAILED; BLOGE(sc, "Recovery flow hasn't properly " "completed yet, try again later. " "If you still see this message after a " "few retries then power cycle is required.\n"); rc = ENXIO; goto bxe_init_locked_done; } while (0); } } sc->recovery_state = BXE_RECOVERY_DONE; rc = bxe_nic_load(sc, LOAD_OPEN); bxe_init_locked_done: if (rc) { /* Tell the stack the driver is NOT running! */ BLOGE(sc, "Initialization failed, " "stack notified driver is NOT running!\n"); if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); } return (rc); } static int bxe_stop_locked(struct bxe_softc *sc) { BXE_CORE_LOCK_ASSERT(sc); return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); } /* * Handles controller initialization when called from an unlocked routine. * ifconfig calls this function. * * Returns: * void */ static void bxe_init(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; BXE_CORE_LOCK(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } static int bxe_init_ifnet(struct bxe_softc *sc) { if_t ifp; int capabilities; /* ifconfig entrypoint for media type/status reporting */ ifmedia_init(&sc->ifmedia, IFM_IMASK, bxe_ifmedia_update, bxe_ifmedia_status); /* set the default interface values */ ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media); /* allocate the ifnet structure */ if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { BLOGE(sc, "Interface allocation failed!\n"); return (ENXIO); } if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); if_setioctlfn(ifp, bxe_ioctl); if_setstartfn(ifp, bxe_tx_start); if_setgetcounterfn(ifp, bxe_get_counter); if_settransmitfn(ifp, bxe_tx_mq_start); if_setqflushfn(ifp, bxe_mq_flush); if_setinitfn(ifp, bxe_init); if_setmtu(ifp, sc->mtu); if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)); capabilities = (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWCSUM | IFCAP_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_LRO | IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_WOL_MAGIC); if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ if_setcapenable(ifp, if_getcapabilities(ifp)); if_setbaudrate(ifp, IF_Gbps(10)); /* XXX */ if_setsendqlen(ifp, sc->tx_ring_size); if_setsendqready(ifp); /* XXX */ sc->ifp = ifp; /* attach to the Ethernet interface list */ ether_ifattach(ifp, sc->link_params.mac_addr); /* Attach driver debugnet methods. */ DEBUGNET_SET(ifp, bxe); return (0); } static void bxe_deallocate_bars(struct bxe_softc *sc) { int i; for (i = 0; i < MAX_BARS; i++) { if (sc->bar[i].resource != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->bar[i].rid, sc->bar[i].resource); BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", i, PCIR_BAR(i)); } } } static int bxe_allocate_bars(struct bxe_softc *sc) { u_int flags; int i; memset(sc->bar, 0, sizeof(sc->bar)); for (i = 0; i < MAX_BARS; i++) { /* memory resources reside at BARs 0, 2, 4 */ /* Run `pciconf -lb` to see mappings */ if ((i != 0) && (i != 2) && (i != 4)) { continue; } sc->bar[i].rid = PCIR_BAR(i); flags = RF_ACTIVE; if (i == 0) { flags |= RF_SHAREABLE; } if ((sc->bar[i].resource = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->bar[i].rid, flags)) == NULL) { return (0); } sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n", i, PCIR_BAR(i), rman_get_start(sc->bar[i].resource), rman_get_end(sc->bar[i].resource), rman_get_size(sc->bar[i].resource), (uintmax_t)sc->bar[i].kva); } return (0); } static void bxe_get_function_num(struct bxe_softc *sc) { uint32_t val = 0; /* * Read the ME register to get the function number. The ME register * holds the relative-function number and absolute-function number. The * absolute-function number appears only in E2 and above. Before that * these bits always contained zero, therefore we cannot blindly use them. */ val = REG_RD(sc, BAR_ME_REGISTER); sc->pfunc_rel = (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); sc->path_id = (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); } else { sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); } BLOGD(sc, DBG_LOAD, "Relative function %d, Absolute function %d, Path %d\n", sc->pfunc_rel, sc->pfunc_abs, sc->path_id); } static uint32_t bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) { uint32_t shmem2_size; uint32_t offset; uint32_t mf_cfg_offset_value; /* Non 57712 */ offset = (SHMEM_RD(sc, func_mb) + (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); /* 57712 plus */ if (sc->devinfo.shmem2_base != 0) { shmem2_size = SHMEM2_RD(sc, size); if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { offset = mf_cfg_offset_value; } } } return (offset); } static uint32_t bxe_pcie_capability_read(struct bxe_softc *sc, int reg, int width) { int pcie_reg; /* ensure PCIe capability is enabled */ if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { if (pcie_reg != 0) { BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); return (pci_read_config(sc->dev, (pcie_reg + reg), width)); } } BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); return (0); } static uint8_t bxe_is_pcie_pending(struct bxe_softc *sc) { return (bxe_pcie_capability_read(sc, PCIER_DEVICE_STA, 2) & PCIEM_STA_TRANSACTION_PND); } /* * Walk the PCI capabiites list for the device to find what features are * supported. These capabilites may be enabled/disabled by firmware so it's * best to walk the list rather than make assumptions. */ static void bxe_probe_pci_caps(struct bxe_softc *sc) { uint16_t link_status; int reg; /* check if PCI Power Management is enabled */ if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; } } link_status = bxe_pcie_capability_read(sc, PCIER_LINK_STA, 2); /* handle PCIe 2.0 workarounds for 57710 */ if (CHIP_IS_E1(sc)) { /* workaround for 57710 errata E4_57710_27462 */ sc->devinfo.pcie_link_speed = (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; /* workaround for 57710 errata E4_57710_27488 */ sc->devinfo.pcie_link_width = ((link_status & PCIEM_LINK_STA_WIDTH) >> 4); if (sc->devinfo.pcie_link_speed > 1) { sc->devinfo.pcie_link_width = ((link_status & PCIEM_LINK_STA_WIDTH) >> 4) >> 1; } } else { sc->devinfo.pcie_link_speed = (link_status & PCIEM_LINK_STA_SPEED); sc->devinfo.pcie_link_width = ((link_status & PCIEM_LINK_STA_WIDTH) >> 4); } BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; /* check if MSI capability is enabled */ if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; } } /* check if MSI-X capability is enabled */ if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; } } } static int bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val; /* get the outer vlan if we're in switch-dependent mode */ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); mf_info->ext_id = (uint16_t)val; mf_info->multi_vnics_mode = 1; if (!VALID_OVLAN(mf_info->ext_id)) { BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); return (1); } /* get the capabilities */ if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_ISCSI) { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_FCOE) { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; } else { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; } mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static uint32_t bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) { uint32_t retval = 0; uint32_t val; val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { retval |= MF_PROTO_SUPPORT_ETHERNET; } if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { retval |= MF_PROTO_SUPPORT_ISCSI; } if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { retval |= MF_PROTO_SUPPORT_FCOE; } } return (retval); } static int bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val; /* * There is no outer vlan if we're in switch-independent mode. * If the mac is valid then assume multi-function. */ val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static int bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t e1hov_tag; uint32_t func_config; uint32_t niv_config; mf_info->multi_vnics_mode = 1; e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); mf_info->ext_id = (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> FUNC_MF_CFG_E1HOV_TAG_SHIFT); mf_info->default_vlan = (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> FUNC_MF_CFG_AFEX_VLAN_SHIFT); mf_info->niv_allowed_priorities = (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); mf_info->niv_default_cos = (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); mf_info->afex_vlan_mode = ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); mf_info->niv_mba_enabled = ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static int bxe_check_valid_mf_cfg(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t mf_cfg1; uint32_t mf_cfg2; uint32_t ovlan1; uint32_t ovlan2; uint8_t i, j; BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", SC_PORT(sc)); BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", mf_info->mf_config[SC_VN(sc)]); BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", mf_info->multi_vnics_mode); BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", mf_info->vnics_per_port); BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", mf_info->ext_id); BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", mf_info->min_bw[0], mf_info->min_bw[1], mf_info->min_bw[2], mf_info->min_bw[3]); BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", mf_info->max_bw[0], mf_info->max_bw[1], mf_info->max_bw[2], mf_info->max_bw[3]); BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", sc->mac_addr_str); /* various MF mode sanity checks... */ if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { BLOGE(sc, "Enumerated function %d is marked as hidden\n", SC_PORT(sc)); return (1); } if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", mf_info->vnics_per_port, mf_info->multi_vnics_mode); return (1); } if (mf_info->mf_mode == MULTI_FUNCTION_SD) { /* vnic id > 0 must have valid ovlan in switch-dependent mode */ if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", SC_VN(sc), OVLAN(sc)); return (1); } if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", mf_info->multi_vnics_mode, OVLAN(sc)); return (1); } /* * Verify all functions are either MF or SF mode. If MF, make sure * sure that all non-hidden functions have a valid ovlan. If SF, * make sure that all non-hidden functions have an invalid ovlan. */ FOREACH_ABS_FUNC_IN_PORT(sc, i) { mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { BLOGE(sc, "mf_mode=SD function %d MF config " "mismatch, multi_vnics_mode=%d ovlan=%d\n", i, mf_info->multi_vnics_mode, ovlan1); return (1); } } /* Verify all funcs on the same port each have a different ovlan. */ FOREACH_ABS_FUNC_IN_PORT(sc, i) { mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); /* iterate from the next function on the port to the max func */ for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan1) && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan2) && (ovlan1 == ovlan2)) { BLOGE(sc, "mf_mode=SD functions %d and %d " "have the same ovlan (%d)\n", i, j, ovlan1); return (1); } } } } /* MULTI_FUNCTION_SD */ return (0); } static int bxe_get_mf_cfg_info(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val, mac_upper; uint8_t i, vnic; /* initialize mf_info defaults */ mf_info->vnics_per_port = 1; mf_info->multi_vnics_mode = FALSE; mf_info->path_has_ovlan = FALSE; mf_info->mf_mode = SINGLE_FUNCTION; if (!CHIP_IS_MF_CAP(sc)) { return (0); } if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { BLOGE(sc, "Invalid mf_cfg_base!\n"); return (1); } /* get the MF mode (switch dependent / independent / single-function) */ val = SHMEM_RD(sc, dev_info.shared_feature_config.config); switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) { case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); /* check for legal upper mac bytes */ if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { mf_info->mf_mode = MULTI_FUNCTION_SI; } else { BLOGE(sc, "Invalid config for Switch Independent mode\n"); } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: /* get outer vlan configuration */ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { mf_info->mf_mode = MULTI_FUNCTION_SD; } else { BLOGE(sc, "Invalid config for Switch Dependent mode\n"); } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ return (0); case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: /* * Mark MF mode as NIV if MCP version includes NPAR-SD support * and the MAC address is valid. */ mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); if ((SHMEM2_HAS(sc, afex_driver_support)) && (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { mf_info->mf_mode = MULTI_FUNCTION_AFEX; } else { BLOGE(sc, "Invalid config for AFEX mode\n"); } break; default: BLOGE(sc, "Unknown MF mode (0x%08x)\n", (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); return (1); } /* set path mf_mode (which could be different than function mf_mode) */ if (mf_info->mf_mode == MULTI_FUNCTION_SD) { mf_info->path_has_ovlan = TRUE; } else if (mf_info->mf_mode == SINGLE_FUNCTION) { /* * Decide on path multi vnics mode. If we're not in MF mode and in * 4-port mode, this is good enough to check vnic-0 of the other port * on the same path */ if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { uint8_t other_port = !(PORT_ID(sc) & 1); uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; } } if (mf_info->mf_mode == SINGLE_FUNCTION) { /* invalid MF config */ if (SC_VN(sc) >= 1) { BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); return (1); } return (0); } /* get the MF configuration */ mf_info->mf_config[SC_VN(sc)] = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); switch(mf_info->mf_mode) { case MULTI_FUNCTION_SD: bxe_get_shmem_mf_cfg_info_sd(sc); break; case MULTI_FUNCTION_SI: bxe_get_shmem_mf_cfg_info_si(sc); break; case MULTI_FUNCTION_AFEX: bxe_get_shmem_mf_cfg_info_niv(sc); break; default: BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", mf_info->mf_mode); return (1); } /* get the congestion management parameters */ vnic = 0; FOREACH_ABS_FUNC_IN_PORT(sc, i) { /* get min/max bw */ val = MFCFG_RD(sc, func_mf_config[i].config); mf_info->min_bw[vnic] = ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); mf_info->max_bw[vnic] = ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); vnic++; } return (bxe_check_valid_mf_cfg(sc)); } static int bxe_get_shmem_info(struct bxe_softc *sc) { int port; uint32_t mac_hi, mac_lo, val; port = SC_PORT(sc); mac_hi = mac_lo = 0; sc->link_params.sc = sc; sc->link_params.port = port; /* get the hardware config info */ sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config); sc->devinfo.hw_config2 = SHMEM_RD(sc, dev_info.shared_hw_config.config2); sc->link_params.hw_led_mode = ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> SHARED_HW_CFG_LED_MODE_SHIFT); /* get the port feature config */ sc->port.config = SHMEM_RD(sc, dev_info.port_feature_config[port].config); /* get the link params */ sc->link_params.speed_cap_mask[0] = SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); sc->link_params.speed_cap_mask[1] = SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); /* get the lane config */ sc->link_params.lane_config = SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); /* get the link config */ val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); sc->port.link_config[ELINK_INT_PHY] = val; sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); sc->port.link_config[ELINK_EXT_PHY1] = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); /* get the override preemphasis flag and enable it or turn it off */ val = SHMEM_RD(sc, dev_info.shared_feature_config.config); if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; } else { sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; } /* get the initial value of the link params */ sc->link_params.multi_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); /* get external phy info */ sc->port.ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); /* get the multifunction configuration */ bxe_get_mf_cfg_info(sc); /* get the mac address */ if (IS_MF(sc)) { mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); } else { mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); } if ((mac_lo == 0) && (mac_hi == 0)) { *sc->mac_addr_str = 0; BLOGE(sc, "No Ethernet address programmed!\n"); } else { sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), "%02x:%02x:%02x:%02x:%02x:%02x", sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); } return (0); } static void bxe_get_tunable_params(struct bxe_softc *sc) { /* sanity checks */ if ((bxe_interrupt_mode != INTR_MODE_INTX) && (bxe_interrupt_mode != INTR_MODE_MSI) && (bxe_interrupt_mode != INTR_MODE_MSIX)) { BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); bxe_interrupt_mode = INTR_MODE_MSIX; } if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); bxe_queue_count = 0; } if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { if (bxe_max_rx_bufs == 0) { bxe_max_rx_bufs = RX_BD_USABLE; } else { BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); bxe_max_rx_bufs = 2048; } } if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); bxe_hc_rx_ticks = 25; } if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); bxe_hc_tx_ticks = 50; } if (bxe_max_aggregation_size == 0) { bxe_max_aggregation_size = TPA_AGG_SIZE; } if (bxe_max_aggregation_size > 0xffff) { BLOGW(sc, "invalid max_aggregation_size (%d)\n", bxe_max_aggregation_size); bxe_max_aggregation_size = TPA_AGG_SIZE; } if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); bxe_mrrs = -1; } if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); bxe_autogreeen = 0; } if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); bxe_udp_rss = 0; } /* pull in user settings */ sc->interrupt_mode = bxe_interrupt_mode; sc->max_rx_bufs = bxe_max_rx_bufs; sc->hc_rx_ticks = bxe_hc_rx_ticks; sc->hc_tx_ticks = bxe_hc_tx_ticks; sc->max_aggregation_size = bxe_max_aggregation_size; sc->mrrs = bxe_mrrs; sc->autogreeen = bxe_autogreeen; sc->udp_rss = bxe_udp_rss; if (bxe_interrupt_mode == INTR_MODE_INTX) { sc->num_queues = 1; } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ sc->num_queues = min((bxe_queue_count ? bxe_queue_count : mp_ncpus), MAX_RSS_CHAINS); if (sc->num_queues > mp_ncpus) { sc->num_queues = mp_ncpus; } } BLOGD(sc, DBG_LOAD, "User Config: " "debug=0x%lx " "interrupt_mode=%d " "queue_count=%d " "hc_rx_ticks=%d " "hc_tx_ticks=%d " "rx_budget=%d " "max_aggregation_size=%d " "mrrs=%d " "autogreeen=%d " "udp_rss=%d\n", bxe_debug, sc->interrupt_mode, sc->num_queues, sc->hc_rx_ticks, sc->hc_tx_ticks, bxe_rx_budget, sc->max_aggregation_size, sc->mrrs, sc->autogreeen, sc->udp_rss); } static int bxe_media_detect(struct bxe_softc *sc) { int port_type; uint32_t phy_idx = bxe_get_cur_phy_idx(sc); switch (sc->link_params.phy[phy_idx].media_type) { case ELINK_ETH_PHY_SFPP_10G_FIBER: case ELINK_ETH_PHY_XFP_FIBER: BLOGI(sc, "Found 10Gb Fiber media.\n"); sc->media = IFM_10G_SR; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_SFP_1G_FIBER: BLOGI(sc, "Found 1Gb Fiber media.\n"); sc->media = IFM_1000_SX; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_KR: case ELINK_ETH_PHY_CX4: BLOGI(sc, "Found 10GBase-CX4 media.\n"); sc->media = IFM_10G_CX4; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_DA_TWINAX: BLOGI(sc, "Found 10Gb Twinax media.\n"); sc->media = IFM_10G_TWINAX; port_type = PORT_DA; break; case ELINK_ETH_PHY_BASE_T: if (sc->link_params.speed_cap_mask[0] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { BLOGI(sc, "Found 10GBase-T media.\n"); sc->media = IFM_10G_T; port_type = PORT_TP; } else { BLOGI(sc, "Found 1000Base-T media.\n"); sc->media = IFM_1000_T; port_type = PORT_TP; } break; case ELINK_ETH_PHY_NOT_PRESENT: BLOGI(sc, "Media not present.\n"); sc->media = 0; port_type = PORT_OTHER; break; case ELINK_ETH_PHY_UNSPECIFIED: default: BLOGI(sc, "Unknown media!\n"); sc->media = 0; port_type = PORT_OTHER; break; } return port_type; } #define GET_FIELD(value, fname) \ (((value) & (fname##_MASK)) >> (fname##_SHIFT)) #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) static int bxe_get_igu_cam_info(struct bxe_softc *sc) { int pfid = SC_FUNC(sc); int igu_sb_id; uint32_t val; uint8_t fid, igu_sb_cnt = 0; sc->igu_base_sb = 0xff; if (CHIP_INT_MODE_IS_BC(sc)) { int vn = SC_VN(sc); igu_sb_cnt = sc->igu_sb_cnt; sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * FP_SB_MAX_E1x); sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); return (0); } /* IGU in normal mode - read CAM */ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { continue; } fid = IGU_FID(val); if ((fid & IGU_FID_ENCODE_IS_PF)) { if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { continue; } if (IGU_VEC(val) == 0) { /* default status block */ sc->igu_dsb_id = igu_sb_id; } else { if (sc->igu_base_sb == 0xff) { sc->igu_base_sb = igu_sb_id; } igu_sb_cnt++; } } } /* * Due to new PF resource allocation by MFW T7.4 and above, it's optional * that number of CAM entries will not be equal to the value advertised in * PCI. Driver should use the minimal value of both as the actual status * block count */ sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); if (igu_sb_cnt == 0) { BLOGE(sc, "CAM configuration error\n"); return (-1); } return (0); } /* * Gather various information from the device config space, the device itself, * shmem, and the user input. */ static int bxe_get_device_info(struct bxe_softc *sc) { uint32_t val; int rc; /* Get the data for the device */ sc->devinfo.vendor_id = pci_get_vendor(sc->dev); sc->devinfo.device_id = pci_get_device(sc->dev); sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); /* get the chip revision (chip metal comes from pci config space) */ sc->devinfo.chip_id = sc->link_params.chip_id = (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); /* force 57811 according to MISC register */ if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { if (CHIP_IS_57810(sc)) { sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | (sc->devinfo.chip_id & 0x0000ffff)); } else if (CHIP_IS_57810_MF(sc)) { sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | (sc->devinfo.chip_id & 0x0000ffff)); } sc->devinfo.chip_id |= 0x1; } BLOGD(sc, DBG_LOAD, "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", sc->devinfo.chip_id, ((sc->devinfo.chip_id >> 16) & 0xffff), ((sc->devinfo.chip_id >> 12) & 0xf), ((sc->devinfo.chip_id >> 4) & 0xff), ((sc->devinfo.chip_id >> 0) & 0xf)); val = (REG_RD(sc, 0x2874) & 0x55); if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1(sc) && val) || (CHIP_IS_E1H(sc) && (val == 0x55))) { sc->flags |= BXE_ONE_PORT_FLAG; BLOGD(sc, DBG_LOAD, "single port device\n"); } /* set the doorbell size */ sc->doorbell_size = (1 << BXE_DB_SHIFT); /* determine whether the device is in 2 port or 4 port mode */ sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ if (CHIP_IS_E2E3(sc)) { /* * Read port4mode_en_ovwr[0]: * If 1, four port mode is in port4mode_en_ovwr[1]. * If 0, four port mode is in port4mode_en[0]. */ val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); if (val & 1) { val = ((val >> 1) & 1); } else { val = REG_RD(sc, MISC_REG_PORT4MODE_EN); } sc->devinfo.chip_port_mode = (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); } /* get the function and path info for the device */ bxe_get_function_num(sc); /* get the shared memory base address */ sc->devinfo.shmem_base = sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); sc->devinfo.shmem2_base = REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", sc->devinfo.shmem_base, sc->devinfo.shmem2_base); if (!sc->devinfo.shmem_base) { /* this should ONLY prevent upcoming shmem reads */ BLOGI(sc, "MCP not active\n"); sc->flags |= BXE_NO_MCP_FLAG; return (0); } /* make sure the shared memory contents are valid */ val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); return (0); } BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); /* get the bootcode version */ sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); snprintf(sc->devinfo.bc_ver_str, sizeof(sc->devinfo.bc_ver_str), "%d.%d.%d", ((sc->devinfo.bc_ver >> 24) & 0xff), ((sc->devinfo.bc_ver >> 16) & 0xff), ((sc->devinfo.bc_ver >> 8) & 0xff)); BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); /* get the bootcode shmem address */ sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); /* clean indirect addresses as they're not used */ pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); if (IS_PF(sc)) { REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); if (CHIP_IS_E1x(sc)) { REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); } /* * Enable internal target-read (in case we are probed after PF * FLR). Must be done prior to any BAR read access. Only for * 57712 and up */ if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); } } /* get the nvram size */ val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); sc->devinfo.flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); /* get PCI capabilites */ bxe_probe_pci_caps(sc); bxe_set_power_state(sc, PCI_PM_D0); /* get various configuration parameters from shmem */ bxe_get_shmem_info(sc); if (sc->devinfo.pcie_msix_cap_reg != 0) { val = pci_read_config(sc->dev, (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), 2); sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); } else { sc->igu_sb_cnt = 1; } sc->igu_base_addr = BAR_IGU_INTMEM; /* initialize IGU parameters */ if (CHIP_IS_E1x(sc)) { sc->devinfo.int_block = INT_BLOCK_HC; sc->igu_dsb_id = DEF_SB_IGU_ID; sc->igu_base_sb = 0; } else { sc->devinfo.int_block = INT_BLOCK_IGU; /* do not allow device reset during IGU info preocessing */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { int tout = 5000; BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { tout--; DELAY(1000); } if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); return (-1); } } if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; } else { BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); } rc = bxe_get_igu_cam_info(sc); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); if (rc) { return (rc); } } /* * Get base FW non-default (fast path) status block ID. This value is * used to initialize the fw_sb_id saved on the fp/queue structure to * determine the id used by the FW. */ if (CHIP_IS_E1x(sc)) { sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); } else { /* * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of * the same queue are indicated on the same IGU SB). So we prefer * FW and IGU SBs to be the same value. */ sc->base_fw_ndsb = sc->igu_base_sb; } BLOGD(sc, DBG_LOAD, "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", sc->igu_dsb_id, sc->igu_base_sb, sc->igu_sb_cnt, sc->base_fw_ndsb); elink_phy_probe(&sc->link_params); return (0); } static void bxe_link_settings_supported(struct bxe_softc *sc, uint32_t switch_cfg) { uint32_t cfg_size = 0; uint32_t idx; uint8_t port = SC_PORT(sc); /* aggregation of supported attributes of all external phys */ sc->port.supported[0] = 0; sc->port.supported[1] = 0; switch (sc->link_params.num_phys) { case 1: sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; cfg_size = 1; break; case 2: sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; cfg_size = 1; break; case 3: if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { sc->port.supported[1] = sc->link_params.phy[ELINK_EXT_PHY1].supported; sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY2].supported; } else { sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; sc->port.supported[1] = sc->link_params.phy[ELINK_EXT_PHY2].supported; } cfg_size = 2; break; } if (!(sc->port.supported[0] || sc->port.supported[1])) { BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config), SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config2)); return; } if (CHIP_IS_E3(sc)) sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); else { switch (switch_cfg) { case ELINK_SWITCH_CFG_1G: sc->port.phy_addr = REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); break; case ELINK_SWITCH_CFG_10G: sc->port.phy_addr = REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); break; default: BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", sc->port.link_config[0]); return; } } BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); /* mask what we support according to speed_cap_mask per configuration */ for (idx = 0; idx < cfg_size; idx++) { if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; } } BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", sc->port.supported[0], sc->port.supported[1]); ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n", sc->port.supported[0], sc->port.supported[1]); } static void bxe_link_settings_requested(struct bxe_softc *sc) { uint32_t link_config; uint32_t idx; uint32_t cfg_size = 0; sc->port.advertising[0] = 0; sc->port.advertising[1] = 0; switch (sc->link_params.num_phys) { case 1: case 2: cfg_size = 1; break; case 3: cfg_size = 2; break; } for (idx = 0; idx < cfg_size; idx++) { sc->link_params.req_duplex[idx] = DUPLEX_FULL; link_config = sc->port.link_config[idx]; switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_AUTO: if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; sc->port.advertising[idx] |= sc->port.supported[idx]; if (sc->link_params.phy[ELINK_EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) sc->port.advertising[idx] |= (ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full); } else { /* force 10G, no AN */ sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); continue; } break; case PORT_FEATURE_LINK_SPEED_10M_FULL: if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10M_HALF: if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | ADVERTISED_TP); ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n", sc->link_params.req_duplex[idx]); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_FULL: if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_HALF: if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_1G: if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_2_5G: if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10G_CX4: if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_20G: sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; break; default: BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; sc->port.advertising[idx] = sc->port.supported[idx]; break; } sc->link_params.req_flow_ctrl[idx] = (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; } else { bxe_set_requested_fc(sc); } } BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " "req_flow_ctrl=0x%x advertising=0x%x\n", sc->link_params.req_line_speed[idx], sc->link_params.req_duplex[idx], sc->link_params.req_flow_ctrl[idx], sc->port.advertising[idx]); ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d " "advertising=0x%x\n", sc->link_params.req_line_speed[idx], sc->link_params.req_duplex[idx], sc->port.advertising[idx]); } } static void bxe_get_phy_info(struct bxe_softc *sc) { uint8_t port = SC_PORT(sc); uint32_t config = sc->port.config; uint32_t eee_mode; /* shmem data already read in bxe_get_shmem_info() */ ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x " "link_config0=0x%08x\n", sc->link_params.lane_config, sc->link_params.speed_cap_mask[0], sc->port.link_config[0]); bxe_link_settings_supported(sc, sc->link_params.switch_cfg); bxe_link_settings_requested(sc); if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } /* configure link feature according to nvram value */ eee_mode = (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | ELINK_EEE_MODE_ENABLE_LPI | ELINK_EEE_MODE_OUTPUT_TIME); } else { sc->link_params.eee_mode = 0; } /* get the media type */ bxe_media_detect(sc); ELINK_DEBUG_P1(sc, "detected media type\n", sc->media); } static void bxe_get_params(struct bxe_softc *sc) { /* get user tunable params */ bxe_get_tunable_params(sc); /* select the RX and TX ring sizes */ sc->tx_ring_size = TX_BD_USABLE; sc->rx_ring_size = RX_BD_USABLE; /* XXX disable WoL */ sc->wol = 0; } static void bxe_set_modes_bitmap(struct bxe_softc *sc) { uint32_t flags = 0; if (CHIP_REV_IS_FPGA(sc)) { SET_FLAGS(flags, MODE_FPGA); } else if (CHIP_REV_IS_EMUL(sc)) { SET_FLAGS(flags, MODE_EMUL); } else { SET_FLAGS(flags, MODE_ASIC); } if (CHIP_IS_MODE_4_PORT(sc)) { SET_FLAGS(flags, MODE_PORT4); } else { SET_FLAGS(flags, MODE_PORT2); } if (CHIP_IS_E2(sc)) { SET_FLAGS(flags, MODE_E2); } else if (CHIP_IS_E3(sc)) { SET_FLAGS(flags, MODE_E3); if (CHIP_REV(sc) == CHIP_REV_Ax) { SET_FLAGS(flags, MODE_E3_A0); } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); } } if (IS_MF(sc)) { SET_FLAGS(flags, MODE_MF); switch (sc->devinfo.mf_info.mf_mode) { case MULTI_FUNCTION_SD: SET_FLAGS(flags, MODE_MF_SD); break; case MULTI_FUNCTION_SI: SET_FLAGS(flags, MODE_MF_SI); break; case MULTI_FUNCTION_AFEX: SET_FLAGS(flags, MODE_MF_AFEX); break; } } else { SET_FLAGS(flags, MODE_SF); } #if defined(__LITTLE_ENDIAN) SET_FLAGS(flags, MODE_LITTLE_ENDIAN); #else /* __BIG_ENDIAN */ SET_FLAGS(flags, MODE_BIG_ENDIAN); #endif INIT_MODE_FLAGS(sc) = flags; } static int bxe_alloc_hsi_mem(struct bxe_softc *sc) { struct bxe_fastpath *fp; bus_addr_t busaddr; int max_agg_queues; int max_segments; bus_size_t max_size; bus_size_t max_seg_size; char buf[32]; int rc; int i, j; /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ /* allocate the parent bus DMA tag */ rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ BUS_SPACE_MAXSIZE_32BIT, /* max map size */ BUS_SPACE_UNRESTRICTED, /* num discontinuous */ BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &sc->parent_dma_tag); /* returned dma tag */ if (rc != 0) { BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); return (1); } /************************/ /* DEFAULT STATUS BLOCK */ /************************/ if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), &sc->def_sb_dma, "default status block") != 0) { /* XXX */ bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; /***************/ /* EVENT QUEUE */ /***************/ if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, &sc->eq_dma, "event queue") != 0) { /* XXX */ bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; /*************/ /* SLOW PATH */ /*************/ if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), &sc->sp_dma, "slow path") != 0) { /* XXX */ bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; /*******************/ /* SLOW PATH QUEUE */ /*******************/ if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, &sc->spq_dma, "slow path queue") != 0) { /* XXX */ bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; /***************************/ /* FW DECOMPRESSION BUFFER */ /***************************/ if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, "fw decompression buffer") != 0) { /* XXX */ bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; if ((sc->gz_strm = malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { /* XXX */ bxe_dma_free(sc, &sc->gz_buf_dma); sc->gz_buf = NULL; bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } /*************/ /* FASTPATHS */ /*************/ /* allocate DMA memory for each fastpath structure */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->sc = sc; fp->index = i; /*******************/ /* FP STATUS BLOCK */ /*******************/ snprintf(buf, sizeof(buf), "fp %d status block", i); if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), &fp->sb_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { if (CHIP_IS_E2E3(sc)) { fp->status_block.e2_sb = (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; } else { fp->status_block.e1x_sb = (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; } } /******************/ /* FP TX BD CHAIN */ /******************/ snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), &fp->tx_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; } /* link together the tx bd chain pages */ for (j = 1; j <= TX_BD_NUM_PAGES; j++) { /* index into the tx bd chain array to last entry per page */ struct eth_tx_next_bd *tx_next_bd = &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; /* point to the next page and wrap from last page */ busaddr = (fp->tx_dma.paddr + (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); } /******************/ /* FP RX BD CHAIN */ /******************/ snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), &fp->rx_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; } /* link together the rx bd chain pages */ for (j = 1; j <= RX_BD_NUM_PAGES; j++) { /* index into the rx bd chain array to last entry per page */ struct eth_rx_bd *rx_bd = &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; /* point to the next page and wrap from last page */ busaddr = (fp->rx_dma.paddr + (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); rx_bd->addr_hi = htole32(U64_HI(busaddr)); rx_bd->addr_lo = htole32(U64_LO(busaddr)); } /*******************/ /* FP RX RCQ CHAIN */ /*******************/ snprintf(buf, sizeof(buf), "fp %d rcq chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), &fp->rcq_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; } /* link together the rcq chain pages */ for (j = 1; j <= RCQ_NUM_PAGES; j++) { /* index into the rcq chain array to last entry per page */ struct eth_rx_cqe_next_page *rx_cqe_next = (struct eth_rx_cqe_next_page *) &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; /* point to the next page and wrap from last page */ busaddr = (fp->rcq_dma.paddr + (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); } /*******************/ /* FP RX SGE CHAIN */ /*******************/ snprintf(buf, sizeof(buf), "fp %d sge chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), &fp->rx_sge_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; } /* link together the sge chain pages */ for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { /* index into the rcq chain array to last entry per page */ struct eth_rx_sge *rx_sge = &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; /* point to the next page and wrap from last page */ busaddr = (fp->rx_sge_dma.paddr + (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); rx_sge->addr_hi = htole32(U64_HI(busaddr)); rx_sge->addr_lo = htole32(U64_LO(busaddr)); } /***********************/ /* FP TX MBUF DMA MAPS */ /***********************/ /* set required sizes before mapping to conserve resources */ if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { max_size = BXE_TSO_MAX_SIZE; max_segments = BXE_TSO_MAX_SEGMENTS; max_seg_size = BXE_TSO_MAX_SEG_SIZE; } else { max_size = (MCLBYTES * BXE_MAX_SEGMENTS); max_segments = BXE_MAX_SEGMENTS; max_seg_size = MCLBYTES; } /* create a dma tag for the tx mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ max_size, /* max map size */ max_segments, /* num discontinuous */ max_seg_size, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->tx_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d tx mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for each of the tx mbuf clusters */ for (j = 0; j < TX_BD_TOTAL; j++) { if (bus_dmamap_create(fp->tx_mbuf_tag, BUS_DMA_NOWAIT, &fp->tx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d tx mbuf %d' (%d)\n", i, j, rc); return (1); } } /***********************/ /* FP RX MBUF DMA MAPS */ /***********************/ /* create a dma tag for the rx mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ MJUM9BYTES, /* max map size */ 1, /* num discontinuous */ MJUM9BYTES, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->rx_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d rx mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for each of the rx mbuf clusters */ for (j = 0; j < RX_BD_TOTAL; j++) { if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx mbuf cluster */ if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx mbuf' (%d)\n", i, rc); return (1); } /***************************/ /* FP RX SGE MBUF DMA MAPS */ /***************************/ /* create a dma tag for the rx sge mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ BCM_PAGE_SIZE, /* max map size */ 1, /* num discontinuous */ BCM_PAGE_SIZE, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->rx_sge_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d rx sge mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for the rx sge mbuf clusters */ for (j = 0; j < RX_SGE_TOTAL; j++) { if (bus_dmamap_create(fp->rx_sge_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_sge_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx sge mbuf cluster */ if (bus_dmamap_create(fp->rx_sge_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_sge_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx sge mbuf' (%d)\n", i, rc); return (1); } /***************************/ /* FP RX TPA MBUF DMA MAPS */ /***************************/ /* create dma maps for the rx tpa mbuf clusters */ max_agg_queues = MAX_AGG_QS(sc); for (j = 0; j < max_agg_queues; j++) { if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_tpa_info[j].bd.m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx tpa mbuf cluster */ if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_tpa_info_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx tpa mbuf' (%d)\n", i, rc); return (1); } bxe_init_sge_ring_bit_mask(fp); } return (0); } static void bxe_free_hsi_mem(struct bxe_softc *sc) { struct bxe_fastpath *fp; int max_agg_queues; int i, j; if (sc->parent_dma_tag == NULL) { return; /* assume nothing was allocated */ } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; /*******************/ /* FP STATUS BLOCK */ /*******************/ bxe_dma_free(sc, &fp->sb_dma); memset(&fp->status_block, 0, sizeof(fp->status_block)); /******************/ /* FP TX BD CHAIN */ /******************/ bxe_dma_free(sc, &fp->tx_dma); fp->tx_chain = NULL; /******************/ /* FP RX BD CHAIN */ /******************/ bxe_dma_free(sc, &fp->rx_dma); fp->rx_chain = NULL; /*******************/ /* FP RX RCQ CHAIN */ /*******************/ bxe_dma_free(sc, &fp->rcq_dma); fp->rcq_chain = NULL; /*******************/ /* FP RX SGE CHAIN */ /*******************/ bxe_dma_free(sc, &fp->rx_sge_dma); fp->rx_sge_chain = NULL; /***********************/ /* FP TX MBUF DMA MAPS */ /***********************/ if (fp->tx_mbuf_tag != NULL) { for (j = 0; j < TX_BD_TOTAL; j++) { if (fp->tx_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->tx_mbuf_tag, fp->tx_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->tx_mbuf_tag, fp->tx_mbuf_chain[j].m_map); } } bus_dma_tag_destroy(fp->tx_mbuf_tag); fp->tx_mbuf_tag = NULL; } /***********************/ /* FP RX MBUF DMA MAPS */ /***********************/ if (fp->rx_mbuf_tag != NULL) { for (j = 0; j < RX_BD_TOTAL; j++) { if (fp->rx_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_chain[j].m_map); } } if (fp->rx_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); } /***************************/ /* FP RX TPA MBUF DMA MAPS */ /***************************/ max_agg_queues = MAX_AGG_QS(sc); for (j = 0; j < max_agg_queues; j++) { if (fp->rx_tpa_info[j].bd.m_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info[j].bd.m_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_tpa_info[j].bd.m_map); } } if (fp->rx_tpa_info_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map); } bus_dma_tag_destroy(fp->rx_mbuf_tag); fp->rx_mbuf_tag = NULL; } /***************************/ /* FP RX SGE MBUF DMA MAPS */ /***************************/ if (fp->rx_sge_mbuf_tag != NULL) { for (j = 0; j < RX_SGE_TOTAL; j++) { if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[j].m_map); } } if (fp->rx_sge_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map); bus_dmamap_destroy(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map); } bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); fp->rx_sge_mbuf_tag = NULL; } } /***************************/ /* FW DECOMPRESSION BUFFER */ /***************************/ bxe_dma_free(sc, &sc->gz_buf_dma); sc->gz_buf = NULL; free(sc->gz_strm, M_DEVBUF); sc->gz_strm = NULL; /*******************/ /* SLOW PATH QUEUE */ /*******************/ bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; /*************/ /* SLOW PATH */ /*************/ bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; /***************/ /* EVENT QUEUE */ /***************/ bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; /************************/ /* DEFAULT STATUS BLOCK */ /************************/ bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); sc->parent_dma_tag = NULL; } /* * Previous driver DMAE transaction may have occurred when pre-boot stage * ended and boot began. This would invalidate the addresses of the * transaction, resulting in was-error bit set in the PCI causing all * hw-to-host PCIe transactions to timeout. If this happened we want to clear * the interrupt which detected this from the pglueb and the was-done bit */ static void bxe_prev_interrupted_dmae(struct bxe_softc *sc) { uint32_t val; if (!CHIP_IS_E1x(sc)) { val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { BLOGD(sc, DBG_LOAD, "Clearing 'was-error' bit that was set in pglueb"); REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); } } } static int bxe_prev_mcp_done(struct bxe_softc *sc) { uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); if (!rc) { BLOGE(sc, "MCP response failure, aborting\n"); return (-1); } return (0); } static struct bxe_prev_list_node * bxe_prev_path_get_entry(struct bxe_softc *sc) { struct bxe_prev_list_node *tmp; LIST_FOREACH(tmp, &bxe_prev_list, node) { if ((sc->pcie_bus == tmp->bus) && (sc->pcie_device == tmp->slot) && (SC_PATH(sc) == tmp->path)) { return (tmp); } } return (NULL); } static uint8_t bxe_prev_is_path_marked(struct bxe_softc *sc) { struct bxe_prev_list_node *tmp; int rc = FALSE; mtx_lock(&bxe_prev_mtx); tmp = bxe_prev_path_get_entry(sc); if (tmp) { if (tmp->aer) { BLOGD(sc, DBG_LOAD, "Path %d/%d/%d was marked by AER\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } else { rc = TRUE; BLOGD(sc, DBG_LOAD, "Path %d/%d/%d was already cleaned from previous drivers\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } } mtx_unlock(&bxe_prev_mtx); return (rc); } static int bxe_prev_mark_path(struct bxe_softc *sc, uint8_t after_undi) { struct bxe_prev_list_node *tmp; mtx_lock(&bxe_prev_mtx); /* Check whether the entry for this path already exists */ tmp = bxe_prev_path_get_entry(sc); if (tmp) { if (!tmp->aer) { BLOGD(sc, DBG_LOAD, "Re-marking AER in path %d/%d/%d\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } else { BLOGD(sc, DBG_LOAD, "Removing AER indication from path %d/%d/%d\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); tmp->aer = 0; } mtx_unlock(&bxe_prev_mtx); return (0); } mtx_unlock(&bxe_prev_mtx); /* Create an entry for this path and add it */ tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, (M_NOWAIT | M_ZERO)); if (!tmp) { BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); return (-1); } tmp->bus = sc->pcie_bus; tmp->slot = sc->pcie_device; tmp->path = SC_PATH(sc); tmp->aer = 0; tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; mtx_lock(&bxe_prev_mtx); BLOGD(sc, DBG_LOAD, "Marked path %d/%d/%d - finished previous unload\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); mtx_unlock(&bxe_prev_mtx); return (0); } static int bxe_do_flr(struct bxe_softc *sc) { int i; /* only E2 and onwards support FLR */ if (CHIP_IS_E1x(sc)) { BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); return (-1); } /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", sc->devinfo.bc_ver); return (-1); } /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { if (i) { DELAY(((1 << (i - 1)) * 100) * 1000); } if (!bxe_is_pcie_pending(sc)) { goto clear; } } BLOGE(sc, "PCIE transaction is not cleared, " "proceeding with reset anyway\n"); clear: BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); return (0); } struct bxe_mac_vals { uint32_t xmac_addr; uint32_t xmac_val; uint32_t emac_addr; uint32_t emac_val; uint32_t umac_addr; uint32_t umac_val; uint32_t bmac_addr; uint32_t bmac_val[2]; }; static void bxe_prev_unload_close_mac(struct bxe_softc *sc, struct bxe_mac_vals *vals) { uint32_t val, base_addr, offset, mask, reset_reg; uint8_t mac_stopped = FALSE; uint8_t port = SC_PORT(sc); uint32_t wb_data[2]; /* reset addresses as they also mark which values were changed */ vals->bmac_addr = 0; vals->umac_addr = 0; vals->xmac_addr = 0; vals->emac_addr = 0; reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); if (!CHIP_IS_E3(sc)) { val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; if ((mask & reset_reg) && val) { BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL : BIGMAC_REGISTER_BMAC_CONTROL; /* * use rd/wr since we cannot use dmae. This is safe * since MCP won't access the bus due to the request * to unload, and no function on the path can be * loaded at this time. */ wb_data[0] = REG_RD(sc, base_addr + offset); wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); vals->bmac_addr = base_addr + offset; vals->bmac_val[0] = wb_data[0]; vals->bmac_val[1] = wb_data[1]; wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; REG_WR(sc, vals->bmac_addr, wb_data[0]); REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); } BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; vals->emac_val = REG_RD(sc, vals->emac_addr); REG_WR(sc, vals->emac_addr, 0); mac_stopped = TRUE; } else { if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); vals->xmac_addr = base_addr + XMAC_REG_CTRL; vals->xmac_val = REG_RD(sc, vals->xmac_addr); REG_WR(sc, vals->xmac_addr, 0); mac_stopped = TRUE; } mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; if (mask & reset_reg) { BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; vals->umac_val = REG_RD(sc, vals->umac_addr); REG_WR(sc, vals->umac_addr, 0); mac_stopped = TRUE; } } if (mac_stopped) { DELAY(20000); } } #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) static void bxe_prev_unload_undi_inc(struct bxe_softc *sc, uint8_t port, uint8_t inc) { uint16_t rcq, bd; uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); BLOGD(sc, DBG_LOAD, "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", port, bd, rcq); } static int bxe_prev_unload_common(struct bxe_softc *sc) { uint32_t reset_reg, tmp_reg = 0, rc; uint8_t prev_undi = FALSE; struct bxe_mac_vals mac_vals; uint32_t timer_count = 1000; uint32_t prev_brb; /* * It is possible a previous function received 'common' answer, * but hasn't loaded yet, therefore creating a scenario of * multiple functions receiving 'common' on the same path. */ BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); memset(&mac_vals, 0, sizeof(mac_vals)); if (bxe_prev_is_path_marked(sc)) { return (bxe_prev_mcp_done(sc)); } reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); /* Reset should be performed after BRB is emptied */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { /* Close the MAC Rx to prevent BRB from filling up */ bxe_prev_unload_close_mac(sc, &mac_vals); /* close LLH filters towards the BRB */ elink_set_rx_filter(&sc->link_params, 0); /* * Check if the UNDI driver was previously loaded. * UNDI driver initializes CID offset for normal bell to 0x7 */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); if (tmp_reg == 0x7) { BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); prev_undi = TRUE; /* clear the UNDI indication */ REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); /* clear possible idle check errors */ REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); } } /* wait until BRB is empty */ tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); while (timer_count) { prev_brb = tmp_reg; tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); if (!tmp_reg) { break; } BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); /* reset timer as long as BRB actually gets emptied */ if (prev_brb > tmp_reg) { timer_count = 1000; } else { timer_count--; } /* If UNDI resides in memory, manually increment it */ if (prev_undi) { bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); } DELAY(10); } if (!timer_count) { BLOGE(sc, "Failed to empty BRB\n"); } } /* No packets are in the pipeline, path is ready for reset */ bxe_reset_common(sc); if (mac_vals.xmac_addr) { REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); } if (mac_vals.umac_addr) { REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); } if (mac_vals.emac_addr) { REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); } if (mac_vals.bmac_addr) { REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); } rc = bxe_prev_mark_path(sc, prev_undi); if (rc) { bxe_prev_mcp_done(sc); return (rc); } return (bxe_prev_mcp_done(sc)); } static int bxe_prev_unload_uncommon(struct bxe_softc *sc) { int rc; BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); /* Test if previous unload process was already finished for this path */ if (bxe_prev_is_path_marked(sc)) { return (bxe_prev_mcp_done(sc)); } BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); /* * If function has FLR capabilities, and existing FW version matches * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); if (!rc) { /* fw version is good */ BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); rc = bxe_do_flr(sc); } if (!rc) { /* FLR was performed */ BLOGD(sc, DBG_LOAD, "FLR successful\n"); return (0); } BLOGD(sc, DBG_LOAD, "Could not FLR\n"); /* Close the MCP request, return failure*/ rc = bxe_prev_mcp_done(sc); if (!rc) { rc = BXE_PREV_WAIT_NEEDED; } return (rc); } static int bxe_prev_unload(struct bxe_softc *sc) { int time_counter = 10; uint32_t fw, hw_lock_reg, hw_lock_val; uint32_t rc = 0; /* * Clear HW from errors which may have resulted from an interrupted * DMAE transaction. */ bxe_prev_interrupted_dmae(sc); /* Release previously held locks */ hw_lock_reg = (SC_FUNC(sc) <= 5) ? (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); hw_lock_val = (REG_RD(sc, hw_lock_reg)); if (hw_lock_val) { if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); } BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); REG_WR(sc, hw_lock_reg, 0xffffffff); } else { BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); } if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); } do { /* Lock MCP using an unload request */ fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); if (!fw) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; break; } if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { rc = bxe_prev_unload_common(sc); break; } /* non-common reply from MCP night require looping */ rc = bxe_prev_unload_uncommon(sc); if (rc != BXE_PREV_WAIT_NEEDED) { break; } DELAY(20000); } while (--time_counter); if (!time_counter || rc) { BLOGE(sc, "Failed to unload previous driver!" " time_counter %d rc %d\n", time_counter, rc); rc = -1; } return (rc); } void bxe_dcbx_set_state(struct bxe_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled) { if (!CHIP_IS_E1x(sc)) { sc->dcb_state = dcb_on; sc->dcbx_enabled = dcbx_enabled; } else { sc->dcb_state = FALSE; sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; } BLOGD(sc, DBG_LOAD, "DCB state [%s:%s]\n", dcb_on ? "ON" : "OFF", (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? "on-chip with negotiation" : "invalid"); } /* must be called after sriov-enable */ static int bxe_set_qm_cid_count(struct bxe_softc *sc) { int cid_count = BXE_L2_MAX_CID(sc); if (IS_SRIOV(sc)) { cid_count += BXE_VF_CIDS; } if (CNIC_SUPPORT(sc)) { cid_count += CNIC_CID_MAX; } return (roundup(cid_count, QM_CID_ROUND)); } static void bxe_init_multi_cos(struct bxe_softc *sc) { int pri, cos; uint32_t pri_map = 0; /* XXX change to user config */ for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); if (cos < sc->max_cos) { sc->prio_to_cos[pri] = cos; } else { BLOGW(sc, "Invalid COS %d for priority %d " "(max COS is %d), setting to 0\n", cos, pri, (sc->max_cos - 1)); sc->prio_to_cos[pri] = 0; } } } static int bxe_sysctl_state(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc; int error, result; result = 0; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) { return (error); } if (result == 1) { uint32_t temp; sc = (struct bxe_softc *)arg1; BLOGI(sc, "... dumping driver state ...\n"); temp = SHMEM2_RD(sc, temperature_in_half_celsius); BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); } return (error); } static int bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; uint32_t *offset; uint64_t value = 0; int index = (int)arg2; if (index >= BXE_NUM_ETH_STATS) { BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); return (-1); } offset = (eth_stats + bxe_eth_stats_arr[index].offset); switch (bxe_eth_stats_arr[index].size) { case 4: value = (uint64_t)*offset; break; case 8: value = HILO_U64(*offset, *(offset + 1)); break; default: BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", index, bxe_eth_stats_arr[index].size); return (-1); } return (sysctl_handle_64(oidp, &value, 0, req)); } static int bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t *eth_stats; uint32_t *offset; uint64_t value = 0; uint32_t q_stat = (uint32_t)arg2; uint32_t fp_index = ((q_stat >> 16) & 0xffff); uint32_t index = (q_stat & 0xffff); eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; if (index >= BXE_NUM_ETH_Q_STATS) { BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); return (-1); } offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); switch (bxe_eth_q_stats_arr[index].size) { case 4: value = (uint64_t)*offset; break; case 8: value = HILO_U64(*offset, *(offset + 1)); break; default: BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", index, bxe_eth_q_stats_arr[index].size); return (-1); } return (sysctl_handle_64(oidp, &value, 0, req)); } static void bxe_force_link_reset(struct bxe_softc *sc) { bxe_acquire_phy_lock(sc); elink_link_reset(&sc->link_params, &sc->link_vars, 1); bxe_release_phy_lock(sc); } static int bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS) { - struct bxe_softc *sc = (struct bxe_softc *)arg1;; + struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t cfg_idx = bxe_get_link_cfg_idx(sc); int rc = 0; int error; int result; error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req); if (error || !req->newptr) { return (error); } if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) { BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param); sc->bxe_pause_param = 8; } result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT); if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) { BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param); return -EINVAL; } if(IS_MF(sc)) return 0; sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO; if(result & ELINK_FLOW_CTRL_RX) sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX; if(result & ELINK_FLOW_CTRL_TX) sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX; if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO) sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE; if(result & 0x400) { if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) { sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO; } sc->link_params.req_fc_auto_adv = 0; if (result & ELINK_FLOW_CTRL_RX) sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX; if (result & ELINK_FLOW_CTRL_TX) sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX; if (!sc->link_params.req_fc_auto_adv) sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE; } if (IS_PF(sc)) { if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_STOP); } if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { bxe_force_link_reset(sc); bxe_acquire_phy_lock(sc); rc = elink_phy_init(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); bxe_calc_fc_adv(sc); } } return rc; } static void bxe_add_sysctls(struct bxe_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *queue_top, *queue; struct sysctl_oid_list *queue_top_children, *queue_children; char queue_num_buf[32]; uint32_t q_stat; int i, j; ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", CTLFLAG_RD, BXE_DRIVER_VERSION, 0, "version"); snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION); snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : "Unknown")); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, "multifunction vnics per port"); snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : "???GT/s"), sc->devinfo.pcie_link_width); sc->debug = bxe_debug; SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, "bootcode version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", CTLFLAG_RD, sc->fw_ver_str, 0, "firmware version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", CTLFLAG_RD, sc->mf_mode_str, 0, "multifunction mode"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", CTLFLAG_RD, sc->mac_addr_str, 0, "mac address"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", CTLFLAG_RD, sc->pci_link_str, 0, "pci link status"); SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, "debug logging mode"); sc->trigger_grcdump = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump", CTLFLAG_RW, &sc->trigger_grcdump, 0, "trigger grcdump should be invoked" " before collecting grcdump"); sc->grcdump_started = 0; sc->grcdump_done = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done", CTLFLAG_RD, &sc->grcdump_done, 0, "set by driver when grcdump is done"); sc->rx_budget = bxe_rx_budget; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", CTLFLAG_RW, &sc->rx_budget, 0, "rx processing budget"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, bxe_sysctl_pauseparam, "IU", "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, bxe_sysctl_state, "IU", "dump driver state"); for (i = 0; i < BXE_NUM_ETH_STATS; i++) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, bxe_eth_stats_arr[i].string, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i, bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string); } /* add a new parent node for all queues "dev.bxe.#.queue" */ queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "queue"); queue_top_children = SYSCTL_CHILDREN(queue_top); for (i = 0; i < sc->num_queues; i++) { /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, queue_num_buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "single queue"); queue_children = SYSCTL_CHILDREN(queue); for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { q_stat = ((i << 16) | j); SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, bxe_eth_q_stats_arr[j].string, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, q_stat, bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string); } } } static int bxe_alloc_buf_rings(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, M_NOWAIT, &fp->tx_mtx); if (fp->tx_br == NULL) return (-1); } return (0); } static void bxe_free_buf_rings(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tx_br) { buf_ring_free(fp->tx_br, M_DEVBUF); fp->tx_br = NULL; } } } static void bxe_init_fp_mutexs(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), "bxe%d_fp%d_tx_lock", sc->unit, i); mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), "bxe%d_fp%d_rx_lock", sc->unit, i); mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); } } static void bxe_destroy_fp_mutexs(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (mtx_initialized(&fp->tx_mtx)) { mtx_destroy(&fp->tx_mtx); } if (mtx_initialized(&fp->rx_mtx)) { mtx_destroy(&fp->rx_mtx); } } } /* * Device attach function. * * Allocates device resources, performs secondary chip identification, and * initializes driver instance variables. This function is called from driver * load after a successful probe. * * Returns: * 0 = Success, >0 = Failure */ static int bxe_attach(device_t dev) { struct bxe_softc *sc; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting attach...\n"); sc->state = BXE_STATE_CLOSED; sc->dev = dev; sc->unit = device_get_unit(dev); BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); sc->pcie_bus = pci_get_bus(dev); sc->pcie_device = pci_get_slot(dev); sc->pcie_func = pci_get_function(dev); /* enable bus master capability */ pci_enable_busmaster(dev); /* get the BARs */ if (bxe_allocate_bars(sc) != 0) { return (ENXIO); } /* initialize the mutexes */ bxe_init_mutexes(sc); /* prepare the periodic callout */ callout_init(&sc->periodic_callout, 0); /* prepare the chip taskqueue */ sc->chip_tq_flags = CHIP_TQ_NONE; snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), "bxe%d_chip_tq", sc->unit); TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, taskqueue_thread_enqueue, &sc->chip_tq); taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ "%s", sc->chip_tq_name); TIMEOUT_TASK_INIT(taskqueue_thread, &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc); /* get device info and set params */ if (bxe_get_device_info(sc) != 0) { BLOGE(sc, "getting device info\n"); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* get final misc params */ bxe_get_params(sc); /* set the default MTU (changed via ifconfig) */ sc->mtu = ETHERMTU; bxe_set_modes_bitmap(sc); /* XXX * If in AFEX mode and the function is configured for FCoE * then bail... no L2 allowed. */ /* get phy settings from shmem and 'and' against admin settings */ bxe_get_phy_info(sc); /* initialize the FreeBSD ifnet interface */ if (bxe_init_ifnet(sc) != 0) { bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } if (bxe_add_cdev(sc) != 0) { if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate device interrupts */ if (bxe_interrupt_alloc(sc) != 0) { bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } bxe_init_fp_mutexs(sc); if (bxe_alloc_buf_rings(sc) != 0) { bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate ilt */ if (bxe_alloc_ilt_mem(sc) != 0) { bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate the host hardware/software hsi structures */ if (bxe_alloc_hsi_mem(sc) != 0) { bxe_free_ilt_mem(sc); bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* need to reset chip if UNDI was active */ if (IS_PF(sc) && !BXE_NOMCP(sc)) { /* init fw_seq */ sc->fw_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); bxe_prev_unload(sc); } #if 1 /* XXX */ bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); #else if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && SHMEM2_RD(sc, dcbx_lldp_params_offset) && SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); bxe_dcbx_init_params(sc); } else { bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); } #endif /* calculate qm_cid_count */ sc->qm_cid_count = bxe_set_qm_cid_count(sc); BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); sc->max_cos = 1; bxe_init_multi_cos(sc); bxe_add_sysctls(sc); return (0); } /* * Device detach function. * * Stops the controller, resets the controller, and releases resources. * * Returns: * 0 = Success, >0 = Failure */ static int bxe_detach(device_t dev) { struct bxe_softc *sc; if_t ifp; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting detach...\n"); ifp = sc->ifp; if (ifp != NULL && if_vlantrunkinuse(ifp)) { BLOGE(sc, "Cannot detach while VLANs are in use.\n"); return(EBUSY); } bxe_del_cdev(sc); /* stop the periodic callout */ bxe_periodic_stop(sc); /* stop the chip taskqueue */ atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); if (sc->chip_tq) { taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); taskqueue_free(sc->chip_tq); sc->chip_tq = NULL; taskqueue_drain_timeout(taskqueue_thread, &sc->sp_err_timeout_task); } /* stop and reset the controller if it was open */ if (sc->state != BXE_STATE_CLOSED) { BXE_CORE_LOCK(sc); bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); sc->state = BXE_STATE_DISABLED; BXE_CORE_UNLOCK(sc); } /* release the network interface */ if (ifp != NULL) { ether_ifdetach(ifp); } ifmedia_removeall(&sc->ifmedia); /* XXX do the following based on driver state... */ /* free the host hardware/software hsi structures */ bxe_free_hsi_mem(sc); /* free ilt */ bxe_free_ilt_mem(sc); bxe_free_buf_rings(sc); /* release the interrupts */ bxe_interrupt_free(sc); /* Release the mutexes*/ bxe_destroy_fp_mutexs(sc); bxe_release_mutexes(sc); /* Release the PCIe BAR mapped memory */ bxe_deallocate_bars(sc); /* Release the FreeBSD interface. */ if (sc->ifp != NULL) { if_free(sc->ifp); } pci_disable_busmaster(dev); return (0); } /* * Device shutdown function. * * Stops and resets the controller. * * Returns: * Nothing */ static int bxe_shutdown(device_t dev) { struct bxe_softc *sc; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); /* stop the periodic callout */ bxe_periodic_stop(sc); if (sc->state != BXE_STATE_CLOSED) { BXE_CORE_LOCK(sc); bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); BXE_CORE_UNLOCK(sc); } return (0); } void bxe_igu_ack_sb(struct bxe_softc *sc, uint8_t igu_sb_id, uint8_t segment, uint16_t index, uint8_t op, uint8_t update) { uint32_t igu_addr = sc->igu_base_addr; igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); } static void bxe_igu_clear_sb_gen(struct bxe_softc *sc, uint8_t func, uint8_t idu_sb_id, uint8_t is_pf) { uint32_t data, ctl, cnt = 100; uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; uint32_t sb_bit = 1 << (idu_sb_id%32); uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; /* Not supported in BC mode */ if (CHIP_INT_MODE_IS_BC(sc)) { return; } data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP); ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | (func_encode << IGU_CTRL_REG_FID_SHIFT) | (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", data, igu_addr_data); REG_WR(sc, igu_addr_data, data); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(sc, igu_addr_ctl, ctl); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); /* wait for clean up to finish */ while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { DELAY(20000); } if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { BLOGD(sc, DBG_LOAD, "Unable to finish IGU cleanup: " "idu_sb_id %d offset %d bit %d (cnt %d)\n", idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); } } static void bxe_igu_clear_sb(struct bxe_softc *sc, uint8_t idu_sb_id) { bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); } /*******************/ /* ECORE CALLBACKS */ /*******************/ static void bxe_reset_common(struct bxe_softc *sc) { uint32_t val = 0x1400; /* reset_common */ REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); if (CHIP_IS_E3(sc)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); } static void bxe_common_init_phy(struct bxe_softc *sc) { uint32_t shmem_base[2]; uint32_t shmem2_base[2]; /* Avoid common init in case MFW supports LFA */ if (SHMEM2_RD(sc, size) > (uint32_t)offsetof(struct shmem2_region, lfa_host_addr[SC_PORT(sc)])) { return; } shmem_base[0] = sc->devinfo.shmem_base; shmem2_base[0] = sc->devinfo.shmem2_base; if (!CHIP_IS_E1x(sc)) { shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); } bxe_acquire_phy_lock(sc); elink_common_init_phy(sc, shmem_base, shmem2_base, sc->devinfo.chip_id, 0); bxe_release_phy_lock(sc); } static void bxe_pf_disable(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); val &= ~IGU_PF_CONF_FUNC_EN; REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); } static void bxe_init_pxp(struct bxe_softc *sc) { uint16_t devctl; int r_order, w_order; devctl = bxe_pcie_capability_read(sc, PCIER_DEVICE_CTL, 2); BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); w_order = ((devctl & PCIEM_CTL_MAX_PAYLOAD) >> 5); if (sc->mrrs == -1) { r_order = ((devctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12); } else { BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); r_order = sc->mrrs; } ecore_init_pxp_arb(sc, r_order, w_order); } static uint32_t bxe_get_pretend_reg(struct bxe_softc *sc) { uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); return (base + (SC_ABS_FUNC(sc)) * stride); } /* * Called only on E1H or E2. * When pretending to be PF, the pretend value is the function number 0..7. * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID * combination. */ static int bxe_pretend_func(struct bxe_softc *sc, uint16_t pretend_func_val) { uint32_t pretend_reg; if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { return (-1); } /* get my own pretend register */ pretend_reg = bxe_get_pretend_reg(sc); REG_WR(sc, pretend_reg, pretend_func_val); REG_RD(sc, pretend_reg); return (0); } static void bxe_iov_init_dmae(struct bxe_softc *sc) { return; } static void bxe_iov_init_dq(struct bxe_softc *sc) { return; } /* send a NIG loopback debug packet */ static void bxe_lb_pckt(struct bxe_softc *sc) { uint32_t wb_write[3]; /* Ethernet source and destination addresses */ wb_write[0] = 0x55555555; wb_write[1] = 0x55555555; wb_write[2] = 0x20; /* SOP */ REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); /* NON-IP protocol */ wb_write[0] = 0x09000000; wb_write[1] = 0x55555555; wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); } /* * Some of the internal memories are not directly readable from the driver. * To test them we send debug packets. */ static int bxe_int_mem_test(struct bxe_softc *sc) { int factor; int count, i; uint32_t val = 0; if (CHIP_REV_IS_FPGA(sc)) { factor = 120; } else if (CHIP_REV_IS_EMUL(sc)) { factor = 200; } else { factor = 1; } /* disable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); REG_WR(sc, CFC_REG_DEBUG0, 0x1); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); /* write 0 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send Ethernet packet */ bxe_lb_pckt(sc); /* TODO do i reset NIG statistic? */ /* Wait until NIG register shows 1 packet of size 0x10 */ count = 1000 * factor; while (count) { bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); if (val == 0x10) { break; } DELAY(10000); count--; } if (val != 0x10) { BLOGE(sc, "NIG timeout val=0x%x\n", val); return (-1); } /* wait until PRS register shows 1 packet */ count = (1000 * factor); while (count) { val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val == 1) { break; } DELAY(10000); count--; } if (val != 0x1) { BLOGE(sc, "PRS timeout val=0x%x\n", val); return (-2); } /* Reset and init BRB, PRS */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); DELAY(50000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); DELAY(50000); ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); /* Disable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); REG_WR(sc, CFC_REG_DEBUG0, 0x1); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); /* Write 0 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send 10 Ethernet packets */ for (i = 0; i < 10; i++) { bxe_lb_pckt(sc); } /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ count = (1000 * factor); while (count) { bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); if (val == 0xb0) { break; } DELAY(10000); count--; } if (val != 0xb0) { BLOGE(sc, "NIG timeout val=0x%x\n", val); return (-3); } /* Wait until PRS register shows 2 packets */ val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val != 2) { BLOGE(sc, "PRS timeout val=0x%x\n", val); } /* Write 1 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); /* Wait until PRS register shows 3 packets */ DELAY(10000 * factor); /* Wait until NIG register shows 1 packet of size 0x10 */ val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val != 3) { BLOGE(sc, "PRS timeout val=0x%x\n", val); } /* clear NIG EOP FIFO */ for (i = 0; i < 11; i++) { REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); } val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); if (val != 1) { BLOGE(sc, "clear of NIG failed val=0x%x\n", val); return (-4); } /* Reset and init BRB, PRS, NIG */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); DELAY(50000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); DELAY(50000); ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); if (!CNIC_SUPPORT(sc)) { /* set NIC mode */ REG_WR(sc, PRS_REG_NIC_MODE, 1); } /* Enable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); REG_WR(sc, CFC_REG_DEBUG0, 0x0); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); return (0); } static void bxe_setup_fan_failure_detection(struct bxe_softc *sc) { int is_required; uint32_t val; int port; is_required = 0; val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & SHARED_HW_CFG_FAN_FAILURE_MASK); if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { is_required = 1; } /* * The fan failure mechanism is usually related to the PHY type since * the power consumption of the board is affected by the PHY. Currently, * fan is required for most designs with SFX7101, BCM8727 and BCM8481. */ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { for (port = PORT_0; port < PORT_MAX; port++) { is_required |= elink_fan_failure_det_req(sc, sc->devinfo.shmem_base, sc->devinfo.shmem2_base, port); } } BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); if (is_required == 0) { return; } /* Fan failure is indicated by SPIO 5 */ bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); /* set to active low mode */ val = REG_RD(sc, MISC_REG_SPIO_INT); val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); REG_WR(sc, MISC_REG_SPIO_INT, val); /* enable interrupt to signal the IGU */ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); val |= MISC_SPIO_SPIO5; REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); } static void bxe_enable_blocks_attention(struct bxe_softc *sc) { uint32_t val; REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); } else { REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); } REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); /* * mask read length error interrupts in brb for parser * (parsing unit and 'checksum and crc' unit) * these errors are legal (PU reads fixed length and CAC can cause * read length error on truncated packets) */ REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); REG_WR(sc, QM_REG_QM_INT_MASK, 0); REG_WR(sc, TM_REG_TM_INT_MASK, 0); REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); if (!CHIP_IS_E1x(sc)) { val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); } REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ if (!CHIP_IS_E1x(sc)) { /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); } REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ } /** * bxe_init_hw_common - initialize the HW at the COMMON phase. * * @sc: driver handle */ static int bxe_init_hw_common(struct bxe_softc *sc) { uint8_t abs_func_id; uint32_t val; BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", SC_ABS_FUNC(sc)); /* * take the RESET lock to protect undi_unload flow from accessing * registers while we are resetting the chip */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); bxe_reset_common(sc); REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); val = 0xfffc; if (CHIP_IS_E3(sc)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); BLOGD(sc, DBG_LOAD, "after misc block init\n"); if (!CHIP_IS_E1x(sc)) { /* * 4-port mode or 2-port mode we need to turn off master-enable for * everyone. After that we turn it back on for self. So, we disregard * multi-function, and always disable all functions on the given path, * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 */ for (abs_func_id = SC_PATH(sc); abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) { if (abs_func_id == SC_ABS_FUNC(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); continue; } bxe_pretend_func(sc, abs_func_id); /* clear pf enable */ bxe_pf_disable(sc); bxe_pretend_func(sc, SC_ABS_FUNC(sc)); } } BLOGD(sc, DBG_LOAD, "after pf disable\n"); ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); if (CHIP_IS_E1(sc)) { /* * enable HW interrupt from PXP on USDM overflow * bit 16 on INT_MASK_0 */ REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); } ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); bxe_init_pxp(sc); #ifdef __BIG_ENDIAN REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); /* make sure this value is 0 */ REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); #endif ecore_ilt_init_page_size(sc, INITOP_SET); if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); } /* let the HW do it's magic... */ DELAY(100000); /* finish PXP init */ val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); if (val != 1) { BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n", val); return (-1); } val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); if (val != 1) { BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val); return (-1); } BLOGD(sc, DBG_LOAD, "after pxp init\n"); /* * Timer bug workaround for E2 only. We need to set the entire ILT to have * entries with value "0" and valid bit on. This needs to be done by the * first PF that is loaded in a path (i.e. common phase) */ if (!CHIP_IS_E1x(sc)) { /* * In E2 there is a bug in the timers block that can cause function 6 / 7 * (i.e. vnic3) to start even if it is marked as "scan-off". * This occurs when a different function (func2,3) is being marked * as "scan-off". Real-life scenario for example: if a driver is being * load-unloaded while func6,7 are down. This will cause the timer to access * the ilt, translate to a logical address and send a request to read/write. * Since the ilt for the function that is down is not valid, this will cause * a translation error which is unrecoverable. * The Workaround is intended to make sure that when this happens nothing * fatal will occur. The workaround: * 1. First PF driver which loads on a path will: * a. After taking the chip out of reset, by using pretend, * it will write "0" to the following registers of * the other vnics. * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); * And for itself it will write '1' to * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable * dmae-operations (writing to pram for example.) * note: can be done for only function 6,7 but cleaner this * way. * b. Write zero+valid to the entire ILT. * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of * VNIC3 (of that port). The range allocated will be the * entire ILT. This is needed to prevent ILT range error. * 2. Any PF driver load flow: * a. ILT update with the physical addresses of the allocated * logical pages. * b. Wait 20msec. - note that this timeout is needed to make * sure there are no requests in one of the PXP internal * queues with "old" ILT addresses. * c. PF enable in the PGLC. * d. Clear the was_error of the PF in the PGLC. (could have * occurred while driver was down) * e. PF enable in the CFC (WEAK + STRONG) * f. Timers scan enable * 3. PF driver unload flow: * a. Clear the Timers scan_en. * b. Polling for scan_on=0 for that PF. * c. Clear the PF enable bit in the PXP. * d. Clear the PF enable in the CFC (WEAK + STRONG) * e. Write zero+valid to all ILT entries (The valid bit must * stay set) * f. If this is VNIC 3 of a port then also init * first_timers_ilt_entry to zero and last_timers_ilt_entry * to the last enrty in the ILT. * * Notes: * Currently the PF error in the PGLC is non recoverable. * In the future the there will be a recovery routine for this error. * Currently attention is masked. * Having an MCP lock on the load/unload process does not guarantee that * there is no Timer disable during Func6/7 enable. This is because the * Timers scan is currently being cleared by the MCP on FLR. * Step 2.d can be done only for PF6/7 and the driver can also check if * there is error before clearing it. But the flow above is simpler and * more general. * All ILT entries are written by zero+valid and not just PF6/7 * ILT entries since in the future the ILT entries allocation for * PF-s might be dynamic. */ struct ilt_client_info ilt_cli; struct ecore_ilt ilt; memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); memset(&ilt, 0, sizeof(struct ecore_ilt)); /* initialize dummy TM client */ ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; /* * Step 1: set zeroes to all ilt page entries with valid bit on * Step 2: set the timers first/last ilt entry to point * to the entire range to prevent ILT range error for 3rd/4th * vnic (this code assumes existence of the vnic) * * both steps performed by call to ecore_ilt_client_init_op() * with dummy TM client * * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT * and his brother are split registers */ bxe_pretend_func(sc, (SC_PATH(sc) + 6)); ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); bxe_pretend_func(sc, SC_ABS_FUNC(sc)); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); } REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); if (!CHIP_IS_E1x(sc)) { int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : (CHIP_REV_IS_FPGA(sc) ? 400 : 0); ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); /* let the HW do it's magic... */ do { DELAY(200000); val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); } while (factor-- && (val != 1)); if (val != 1) { BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val); return (-1); } } BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); bxe_iov_init_dmae(sc); /* clean the DMAE memory */ sc->dmae_ready = 1; ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); /* QM queues pointers table */ ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); /* soft reset pulse */ REG_WR(sc, QM_REG_SOFT_RESET, 1); REG_WR(sc, QM_REG_SOFT_RESET, 0); if (CNIC_SUPPORT(sc)) ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); if (!CHIP_REV_IS_SLOW(sc)) { /* enable hw interrupt from doorbell Q */ REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); } ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); if (!CHIP_IS_E1(sc)) { REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); } if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { if (IS_MF_AFEX(sc)) { /* * configure that AFEX and VLAN headers must be * received in AFEX mode */ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); } else { /* * Bit-map indicating which L2 hdrs may appear * after the basic Ethernet header */ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); } } ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); if (!CHIP_IS_E1x(sc)) { /* reset VFC memories */ REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); DELAY(20000); } ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); /* sync semi rtc */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); if (!CHIP_IS_E1x(sc)) { if (IS_MF_AFEX(sc)) { /* * configure that AFEX and VLAN headers must be * sent in AFEX mode */ REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); } else { REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); } } REG_WR(sc, SRC_REG_SOFT_RST, 1); ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); if (CNIC_SUPPORT(sc)) { REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); } REG_WR(sc, SRC_REG_SOFT_RST, 0); if (sizeof(union cdu_context) != 1024) { /* we currently assume that a context is 1024 bytes */ BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", (long)sizeof(union cdu_context)); } ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); val = (4 << 24) + (0 << 12) + 1024; REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); /* enable context validation interrupt from CFC */ REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); /* set the thresholds to prevent CFC/CDU race */ REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); } ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); /* Reset PCIE errors for debug */ REG_WR(sc, 0x2814, 0xffffffff); REG_WR(sc, 0x3820, 0xffffffff); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); } ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); if (!CHIP_IS_E1(sc)) { /* in E3 this done in per-port section */ if (!CHIP_IS_E3(sc)) REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); } if (CHIP_IS_E1H(sc)) { /* not applicable for E2 (and above ...) */ REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); } if (CHIP_REV_IS_SLOW(sc)) { DELAY(200000); } /* finish CFC init */ val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val); return (-1); } REG_WR(sc, CFC_REG_DEBUG0, 0); if (CHIP_IS_E1(sc)) { /* read NIG statistic to see if this is our first up since powerup */ bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); /* do internal memory self test */ if ((val == 0) && bxe_int_mem_test(sc)) { BLOGE(sc, "internal mem self test failed val=0x%x\n", val); return (-1); } } bxe_setup_fan_failure_detection(sc); /* clear PXP2 attentions */ REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); bxe_enable_blocks_attention(sc); if (!CHIP_REV_IS_SLOW(sc)) { ecore_enable_blocks_parity(sc); } if (!BXE_NOMCP(sc)) { if (CHIP_IS_E1x(sc)) { bxe_common_init_phy(sc); } } return (0); } /** * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. * * @sc: driver handle */ static int bxe_init_hw_common_chip(struct bxe_softc *sc) { int rc = bxe_init_hw_common(sc); if (rc) { BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc); return (rc); } /* In E2 2-PORT mode, same ext phy is used for the two paths */ if (!BXE_NOMCP(sc)) { bxe_common_init_phy(sc); } return (0); } static int bxe_init_hw_port(struct bxe_softc *sc) { int port = SC_PORT(sc); int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; uint32_t low, high; uint32_t val; BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); ecore_init_block(sc, BLOCK_MISC, init_phase); ecore_init_block(sc, BLOCK_PXP, init_phase); ecore_init_block(sc, BLOCK_PXP2, init_phase); /* * Timers bug workaround: disables the pf_master bit in pglue at * common phase, we need to enable it here before any dmae access are * attempted. Therefore we manually added the enable-master to the * port phase (it also happens in the function phase) */ if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); } ecore_init_block(sc, BLOCK_ATC, init_phase); ecore_init_block(sc, BLOCK_DMAE, init_phase); ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); ecore_init_block(sc, BLOCK_QM, init_phase); ecore_init_block(sc, BLOCK_TCM, init_phase); ecore_init_block(sc, BLOCK_UCM, init_phase); ecore_init_block(sc, BLOCK_CCM, init_phase); ecore_init_block(sc, BLOCK_XCM, init_phase); /* QM cid (connection) count */ ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); if (CNIC_SUPPORT(sc)) { ecore_init_block(sc, BLOCK_TM, init_phase); REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); } ecore_init_block(sc, BLOCK_DORQ, init_phase); ecore_init_block(sc, BLOCK_BRB1, init_phase); if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { if (IS_MF(sc)) { low = (BXE_ONE_PORT(sc) ? 160 : 246); } else if (sc->mtu > 4096) { if (BXE_ONE_PORT(sc)) { low = 160; } else { val = sc->mtu; /* (24*1024 + val*4)/256 */ low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); } } else { low = (BXE_ONE_PORT(sc) ? 80 : 160); } high = (low + 56); /* 14*1024/256 */ REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); } if (CHIP_IS_MODE_4_PORT(sc)) { REG_WR(sc, SC_PORT(sc) ? BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0, 40); } ecore_init_block(sc, BLOCK_PRS, init_phase); if (CHIP_IS_E3B0(sc)) { if (IS_MF_AFEX(sc)) { /* configure headers for AFEX mode */ REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); REG_WR(sc, SC_PORT(sc) ? PRS_REG_MUST_HAVE_HDRS_PORT_1 : PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); } else { /* Ovlan exists only if we are in multi-function + * switch-dependent mode, in switch-independent there * is no ovlan headers */ REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); } } ecore_init_block(sc, BLOCK_TSDM, init_phase); ecore_init_block(sc, BLOCK_CSDM, init_phase); ecore_init_block(sc, BLOCK_USDM, init_phase); ecore_init_block(sc, BLOCK_XSDM, init_phase); ecore_init_block(sc, BLOCK_TSEM, init_phase); ecore_init_block(sc, BLOCK_USEM, init_phase); ecore_init_block(sc, BLOCK_CSEM, init_phase); ecore_init_block(sc, BLOCK_XSEM, init_phase); ecore_init_block(sc, BLOCK_UPB, init_phase); ecore_init_block(sc, BLOCK_XPB, init_phase); ecore_init_block(sc, BLOCK_PBF, init_phase); if (CHIP_IS_E1x(sc)) { /* configure PBF to work without PAUSE mtu 9000 */ REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); /* update threshold */ REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); /* update init credit */ REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); /* probe changes */ REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); DELAY(50); REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); } if (CNIC_SUPPORT(sc)) { ecore_init_block(sc, BLOCK_SRC, init_phase); } ecore_init_block(sc, BLOCK_CDU, init_phase); ecore_init_block(sc, BLOCK_CFC, init_phase); if (CHIP_IS_E1(sc)) { REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } ecore_init_block(sc, BLOCK_HC, init_phase); ecore_init_block(sc, BLOCK_IGU, init_phase); ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: * - SF mode: bits 3-7 are masked. only bits 0-2 are in use * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF * bits 4-7 are used for "per vn group attention" */ val = IS_MF(sc) ? 0xF7 : 0x7; /* Enable DCBX attention for all but E1 */ val |= CHIP_IS_E1(sc) ? 0 : 0x10; REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); ecore_init_block(sc, BLOCK_NIG, init_phase); if (!CHIP_IS_E1x(sc)) { /* Bit-map indicating which L2 hdrs may appear after the * basic Ethernet header */ if (IS_MF_AFEX(sc)) { REG_WR(sc, SC_PORT(sc) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); } else { REG_WR(sc, SC_PORT(sc) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, IS_MF_SD(sc) ? 7 : 6); } if (CHIP_IS_E3(sc)) { REG_WR(sc, SC_PORT(sc) ? NIG_REG_LLH1_MF_MODE : NIG_REG_LLH_MF_MODE, IS_MF(sc)); } } if (!CHIP_IS_E3(sc)) { REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); } if (!CHIP_IS_E1(sc)) { /* 0x2 disable mf_ov, 0x1 enable */ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, (IS_MF_SD(sc) ? 0x1 : 0x2)); if (!CHIP_IS_E1x(sc)) { val = 0; switch (sc->devinfo.mf_info.mf_mode) { case MULTI_FUNCTION_SD: val = 1; break; case MULTI_FUNCTION_SI: case MULTI_FUNCTION_AFEX: val = 2; break; } REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : NIG_REG_LLH0_CLS_TYPE), val); } REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); } /* If SPIO5 is set to generate interrupts, enable it for this port */ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); if (val & MISC_SPIO_SPIO5) { uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); val = REG_RD(sc, reg_addr); val |= AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(sc, reg_addr, val); } return (0); } static uint32_t bxe_flr_clnup_reg_poll(struct bxe_softc *sc, uint32_t reg, uint32_t expected, uint32_t poll_count) { uint32_t cur_cnt = poll_count; uint32_t val; while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); } return (val); } static int bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, uint32_t reg, char *msg, uint32_t poll_cnt) { uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); if (val != 0) { BLOGE(sc, "%s usage count=%d\n", msg, val); return (1); } return (0); } /* Common routines with VF FLR cleanup */ static uint32_t bxe_flr_clnup_poll_count(struct bxe_softc *sc) { /* adjust polling timeout */ if (CHIP_REV_IS_EMUL(sc)) { return (FLR_POLL_CNT * 2000); } if (CHIP_REV_IS_FPGA(sc)) { return (FLR_POLL_CNT * 120); } return (FLR_POLL_CNT); } static int bxe_poll_hw_usage_counters(struct bxe_softc *sc, uint32_t poll_cnt) { /* wait for CFC PF usage-counter to zero (includes all the VFs) */ if (bxe_flr_clnup_poll_hw_counter(sc, CFC_REG_NUM_LCIDS_INSIDE_PF, "CFC PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, DORQ_REG_PF_USAGE_CNT, "DQ PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), "QM PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), "Timers VNIC usage counter timed out", poll_cnt)) { return (1); } if (bxe_flr_clnup_poll_hw_counter(sc, TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), "Timers NUM_SCANS usage counter timed out", poll_cnt)) { return (1); } /* Wait DMAE PF usage counter to zero */ if (bxe_flr_clnup_poll_hw_counter(sc, dmae_reg_go_c[INIT_DMAE_C(sc)], "DMAE dommand register timed out", poll_cnt)) { return (1); } return (0); } #define OP_GEN_PARAM(param) \ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) #define OP_GEN_TYPE(type) \ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) #define OP_GEN_AGG_VECT(index) \ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) static int bxe_send_final_clnup(struct bxe_softc *sc, uint8_t clnup_func, uint32_t poll_cnt) { uint32_t op_gen_command = 0; uint32_t comp_addr = (BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); int ret = 0; if (REG_RD(sc, comp_addr)) { BLOGE(sc, "Cleanup complete was not 0 before sending\n"); return (1); } op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); op_gen_command |= OP_GEN_AGG_VECT(clnup_func); op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { BLOGE(sc, "FW final cleanup did not succeed\n"); BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", (REG_RD(sc, comp_addr))); bxe_panic(sc, ("FLR cleanup failed\n")); return (1); } /* Zero completion for nxt FLR */ REG_WR(sc, comp_addr, 0); return (ret); } static void bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, struct pbf_pN_buf_regs *regs, uint32_t poll_count) { uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; uint32_t cur_cnt = poll_count; crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); crd = crd_start = REG_RD(sc, regs->crd); init_crd = REG_RD(sc, regs->init_crd); BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); while ((crd != init_crd) && ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < (init_crd - crd_start))) { if (cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); crd = REG_RD(sc, regs->crd); crd_freed = REG_RD(sc, regs->crd_freed); } else { BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); break; } } BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, struct pbf_pN_cmd_regs *regs, uint32_t poll_count) { uint32_t occup, to_free, freed, freed_start; uint32_t cur_cnt = poll_count; occup = to_free = REG_RD(sc, regs->lines_occup); freed = freed_start = REG_RD(sc, regs->lines_freed); BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); while (occup && ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { if (cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); occup = REG_RD(sc, regs->lines_occup); freed = REG_RD(sc, regs->lines_freed); } else { BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); break; } } BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) { struct pbf_pN_cmd_regs cmd_regs[] = { {0, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, {1, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, {4, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_LB_Q : PBF_REG_P4_TQ_LINES_FREED_CNT} }; struct pbf_pN_buf_regs buf_regs[] = { {0, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD , (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, {1, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, {4, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, }; int i; /* Verify the command queues are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); } /* Verify the transmission buffers are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); } } static void bxe_hw_enable_status(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); val = REG_RD(sc, PBF_REG_DISABLE_PF); BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); } static int bxe_pf_flr_clnup(struct bxe_softc *sc) { uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); /* Re-enable PF target read access */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); /* Poll HW usage counters */ BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { return (-1); } /* Zero the igu 'trailing edge' and 'leading edge' */ /* Send the FW cleanup command */ if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { return (-1); } /* ATC cleanup */ /* Verify TX hw is flushed */ bxe_tx_hw_flushed(sc, poll_cnt); /* Wait 100ms (not adjusted according to platform) */ DELAY(100000); /* Verify no pending pci transactions */ if (bxe_is_pcie_pending(sc)) { BLOGE(sc, "PCIE Transactions still pending\n"); } /* Debug */ bxe_hw_enable_status(sc); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function init */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); return (0); } static int bxe_init_hw_func(struct bxe_softc *sc) { int port = SC_PORT(sc); int func = SC_FUNC(sc); int init_phase = PHASE_PF0 + func; struct ecore_ilt *ilt = sc->ilt; uint16_t cdu_ilt_start; uint32_t addr, val; uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; int i, main_mem_width, rc; BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); /* FLR cleanup */ if (!CHIP_IS_E1x(sc)) { rc = bxe_pf_flr_clnup(sc); if (rc) { BLOGE(sc, "FLR cleanup failed!\n"); // XXX bxe_fw_dump(sc); // XXX bxe_idle_chk(sc); return (rc); } } /* set MSI reconfigure capability */ if (sc->devinfo.int_block == INT_BLOCK_HC) { addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); val = REG_RD(sc, addr); val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; REG_WR(sc, addr, val); } ecore_init_block(sc, BLOCK_PXP, init_phase); ecore_init_block(sc, BLOCK_PXP2, init_phase); ilt = sc->ilt; cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; for (i = 0; i < L2_ILT_LINES(sc); i++) { ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; ilt->lines[cdu_ilt_start + i].page_mapping = sc->context[i].vcxt_dma.paddr; ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; } ecore_ilt_init_op(sc, INITOP_SET); /* Set NIC mode */ REG_WR(sc, PRS_REG_NIC_MODE, 1); BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); if (!CHIP_IS_E1x(sc)) { uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; /* Turn on a single ISR mode in IGU if driver is going to use * INT#x or MSI */ if (sc->interrupt_mode != INTR_MODE_MSIX) { pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; } /* * Timers workaround bug: function init part. * Need to wait 20msec after initializing ILT, * needed to make sure there are no requests in * one of the PXP internal queues with "old" ILT addresses */ DELAY(20000); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function * init */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); /* Enable the function in IGU */ REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); } sc->dmae_ready = 1; ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); ecore_init_block(sc, BLOCK_ATC, init_phase); ecore_init_block(sc, BLOCK_DMAE, init_phase); ecore_init_block(sc, BLOCK_NIG, init_phase); ecore_init_block(sc, BLOCK_SRC, init_phase); ecore_init_block(sc, BLOCK_MISC, init_phase); ecore_init_block(sc, BLOCK_TCM, init_phase); ecore_init_block(sc, BLOCK_UCM, init_phase); ecore_init_block(sc, BLOCK_CCM, init_phase); ecore_init_block(sc, BLOCK_XCM, init_phase); ecore_init_block(sc, BLOCK_TSEM, init_phase); ecore_init_block(sc, BLOCK_USEM, init_phase); ecore_init_block(sc, BLOCK_CSEM, init_phase); ecore_init_block(sc, BLOCK_XSEM, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, QM_REG_PF_EN, 1); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); } ecore_init_block(sc, BLOCK_QM, init_phase); ecore_init_block(sc, BLOCK_TM, init_phase); ecore_init_block(sc, BLOCK_DORQ, init_phase); bxe_iov_init_dq(sc); ecore_init_block(sc, BLOCK_BRB1, init_phase); ecore_init_block(sc, BLOCK_PRS, init_phase); ecore_init_block(sc, BLOCK_TSDM, init_phase); ecore_init_block(sc, BLOCK_CSDM, init_phase); ecore_init_block(sc, BLOCK_USDM, init_phase); ecore_init_block(sc, BLOCK_XSDM, init_phase); ecore_init_block(sc, BLOCK_UPB, init_phase); ecore_init_block(sc, BLOCK_XPB, init_phase); ecore_init_block(sc, BLOCK_PBF, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, PBF_REG_DISABLE_PF, 0); ecore_init_block(sc, BLOCK_CDU, init_phase); ecore_init_block(sc, BLOCK_CFC, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); if (IS_MF(sc)) { REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); } ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); /* HC init per function */ if (sc->devinfo.int_block == INT_BLOCK_HC) { if (CHIP_IS_E1H(sc)) { REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } ecore_init_block(sc, BLOCK_HC, init_phase); } else { int num_segs, sb_idx, prod_offset; REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); } ecore_init_block(sc, BLOCK_IGU, init_phase); if (!CHIP_IS_E1x(sc)) { int dsb_idx = 0; /** * Producer memory: * E2 mode: address 0-135 match to the mapping memory; * 136 - PF0 default prod; 137 - PF1 default prod; * 138 - PF2 default prod; 139 - PF3 default prod; * 140 - PF0 attn prod; 141 - PF1 attn prod; * 142 - PF2 attn prod; 143 - PF3 attn prod; * 144-147 reserved. * * E1.5 mode - In backward compatible mode; * for non default SB; each even line in the memory * holds the U producer and each odd line hold * the C producer. The first 128 producers are for * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 * producers are for the DSB for each PF. * Each PF has five segments: (the order inside each * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; * 132-135 C prods; 136-139 X prods; 140-143 T prods; * 144-147 attn prods; */ /* non-default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { prod_offset = (sc->igu_base_sb + sb_idx) * num_segs; for (i = 0; i < num_segs; i++) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i) * 4; REG_WR(sc, addr, 0); } /* send consumer update with value 0 */ bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); } /* default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; if (CHIP_IS_MODE_4_PORT(sc)) dsb_idx = SC_FUNC(sc); else dsb_idx = SC_VN(sc); prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_BASE_DSB_PROD + dsb_idx : IGU_NORM_BASE_DSB_PROD + dsb_idx); /* * igu prods come in chunks of E1HVN_MAX (4) - * does not matters what is the current chip mode */ for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i)*4; REG_WR(sc, addr, 0); } /* send consumer update with 0 */ if (CHIP_INT_MODE_IS_BC(sc)) { bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, CSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, XSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, TSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } else { bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } bxe_igu_clear_sb(sc, sc->igu_dsb_id); /* !!! these should become driver const once rf-tool supports split-68 const */ REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); } } /* Reset PCIE errors for debug */ REG_WR(sc, 0x2114, 0xffffffff); REG_WR(sc, 0x2120, 0xffffffff); if (CHIP_IS_E1x(sc)) { main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ main_mem_base = HC_REG_MAIN_MEMORY + SC_PORT(sc) * (main_mem_size * 4); main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; main_mem_width = 8; val = REG_RD(sc, main_mem_prty_clr); if (val) { BLOGD(sc, DBG_LOAD, "Parity errors in HC block during function init (0x%x)!\n", val); } /* Clear "false" parity errors in MSI-X table */ for (i = main_mem_base; i < main_mem_base + main_mem_size * 4; i += main_mem_width) { bxe_read_dmae(sc, i, main_mem_width / 4); bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), i, main_mem_width / 4); } /* Clear HC parity attention */ REG_RD(sc, main_mem_prty_clr); } #if 1 /* Enable STORMs SP logging */ REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); #endif elink_phy_probe(&sc->link_params); return (0); } static void bxe_link_reset(struct bxe_softc *sc) { if (!BXE_NOMCP(sc)) { bxe_acquire_phy_lock(sc); elink_lfa_reset(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); } else { if (!CHIP_REV_IS_SLOW(sc)) { BLOGW(sc, "Bootcode is missing - cannot reset link\n"); } } } static void bxe_reset_port(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t val; ELINK_DEBUG_P0(sc, "bxe_reset_port called\n"); /* reset physical Link */ bxe_link_reset(sc); REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); /* Do not rcv packets to BRB */ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); /* Do not direct rcv packets that are not for MCP to the BRB */ REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); /* Configure AEU */ REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); DELAY(100000); /* Check for BRB port occupancy */ val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); if (val) { BLOGD(sc, DBG_LOAD, "BRB1 is not empty, %d blocks are occupied\n", val); } /* TODO: Close Doorbell port? */ } static void bxe_ilt_wr(struct bxe_softc *sc, uint32_t index, bus_addr_t addr) { int reg; uint32_t wb_write[2]; if (CHIP_IS_E1(sc)) { reg = PXP2_REG_RQ_ONCHIP_AT + index*8; } else { reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; } wb_write[0] = ONCHIP_ADDR1(addr); wb_write[1] = ONCHIP_ADDR2(addr); REG_WR_DMAE(sc, reg, wb_write, 2); } static void bxe_clear_func_ilt(struct bxe_softc *sc, uint32_t func) { uint32_t i, base = FUNC_ILT_BASE(func); for (i = base; i < base + ILT_PER_FUNC; i++) { bxe_ilt_wr(sc, i, 0); } } static void bxe_reset_func(struct bxe_softc *sc) { struct bxe_fastpath *fp; int port = SC_PORT(sc); int func = SC_FUNC(sc); int i; /* Disable the function in the FW */ REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); /* FP SBs */ FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), SB_DISABLED); } /* SP SB */ REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); } /* Configure IGU */ if (sc->devinfo.int_block == INT_BLOCK_HC) { REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } else { REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); } if (CNIC_LOADED(sc)) { /* Disable Timer scan */ REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); /* * Wait for at least 10ms and up to 2 second for the timers * scan to complete */ for (i = 0; i < 200; i++) { DELAY(10000); if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) break; } } /* Clear ILT */ bxe_clear_func_ilt(sc, func); /* * Timers workaround bug for E2: if this is vnic-3, * we need to set the entire ilt range for this timers. */ if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { struct ilt_client_info ilt_cli; /* use dummy TM client */ memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); } /* this assumes that reset_port() called before reset_func()*/ if (!CHIP_IS_E1x(sc)) { bxe_pf_disable(sc); } sc->dmae_ready = 0; } static int bxe_gunzip_init(struct bxe_softc *sc) { return (0); } static void bxe_gunzip_end(struct bxe_softc *sc) { return; } static int bxe_init_firmware(struct bxe_softc *sc) { if (CHIP_IS_E1(sc)) { ecore_init_e1_firmware(sc); sc->iro_array = e1_iro_arr; } else if (CHIP_IS_E1H(sc)) { ecore_init_e1h_firmware(sc); sc->iro_array = e1h_iro_arr; } else if (!CHIP_IS_E1x(sc)) { ecore_init_e2_firmware(sc); sc->iro_array = e2_iro_arr; } else { BLOGE(sc, "Unsupported chip revision\n"); return (-1); } return (0); } static void bxe_release_firmware(struct bxe_softc *sc) { /* Do nothing */ return; } static int ecore_gunzip(struct bxe_softc *sc, const uint8_t *zbuf, int len) { /* XXX : Implement... */ BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); return (FALSE); } static void ecore_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val) { bxe_reg_wr_ind(sc, addr, val); } static void ecore_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len) { bxe_write_dmae_phys_len(sc, phys_addr, addr, len); } void ecore_storm_memset_struct(struct bxe_softc *sc, uint32_t addr, size_t size, uint32_t *data) { uint8_t i; for (i = 0; i < size/4; i++) { REG_WR(sc, addr + (i * 4), data[i]); } } /* * character device - ioctl interface definitions */ #include "bxe_dump.h" #include "bxe_ioctl.h" #include static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); static struct cdevsw bxe_cdevsw = { .d_version = D_VERSION, .d_ioctl = bxe_eioctl, .d_name = "bxecnic", }; #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1)) #define DUMP_ALL_PRESETS 0x1FFF #define DUMP_MAX_PRESETS 13 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) #define IS_REG_IN_PRESET(presets, idx) \ ((presets & (1 << (idx-1))) == (1 << (idx-1))) static int bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) { if (CHIP_IS_E1(sc)) return dump_num_registers[0][preset-1]; else if (CHIP_IS_E1H(sc)) return dump_num_registers[1][preset-1]; else if (CHIP_IS_E2(sc)) return dump_num_registers[2][preset-1]; else if (CHIP_IS_E3A0(sc)) return dump_num_registers[3][preset-1]; else if (CHIP_IS_E3B0(sc)) return dump_num_registers[4][preset-1]; else return 0; } static int bxe_get_total_regs_len32(struct bxe_softc *sc) { uint32_t preset_idx; int regdump_len32 = 0; /* Calculate the total preset regs length */ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx); } return regdump_len32; } static const uint32_t * __bxe_get_page_addr_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_vals_e2; else if (CHIP_IS_E3(sc)) return page_vals_e3; else return NULL; } static uint32_t __bxe_get_page_reg_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_MODE_VALUES_E2; else if (CHIP_IS_E3(sc)) return PAGE_MODE_VALUES_E3; else return 0; } static const uint32_t * __bxe_get_page_write_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_write_regs_e2; else if (CHIP_IS_E3(sc)) return page_write_regs_e3; else return NULL; } static uint32_t __bxe_get_page_write_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_WRITE_REGS_E2; else if (CHIP_IS_E3(sc)) return PAGE_WRITE_REGS_E3; else return 0; } static const struct reg_addr * __bxe_get_page_read_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_read_regs_e2; else if (CHIP_IS_E3(sc)) return page_read_regs_e3; else return NULL; } static uint32_t __bxe_get_page_read_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_READ_REGS_E2; else if (CHIP_IS_E3(sc)) return PAGE_READ_REGS_E3; else return 0; } static bool bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info) { if (CHIP_IS_E1(sc)) return IS_E1_REG(reg_info->chips); else if (CHIP_IS_E1H(sc)) return IS_E1H_REG(reg_info->chips); else if (CHIP_IS_E2(sc)) return IS_E2_REG(reg_info->chips); else if (CHIP_IS_E3A0(sc)) return IS_E3A0_REG(reg_info->chips); else if (CHIP_IS_E3B0(sc)) return IS_E3B0_REG(reg_info->chips); else return 0; } static bool bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info) { if (CHIP_IS_E1(sc)) return IS_E1_REG(wreg_info->chips); else if (CHIP_IS_E1H(sc)) return IS_E1H_REG(wreg_info->chips); else if (CHIP_IS_E2(sc)) return IS_E2_REG(wreg_info->chips); else if (CHIP_IS_E3A0(sc)) return IS_E3A0_REG(wreg_info->chips); else if (CHIP_IS_E3B0(sc)) return IS_E3B0_REG(wreg_info->chips); else return 0; } /** * bxe_read_pages_regs - read "paged" registers * * @bp device handle * @p output buffer * * Reads "paged" memories: memories that may only be read by first writing to a * specific address ("write address") and then reading from a specific address * ("read address"). There may be more than one write address per "page" and * more than one read address per write address. */ static void bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) { uint32_t i, j, k, n; /* addresses of the paged registers */ const uint32_t *page_addr = __bxe_get_page_addr_ar(sc); /* number of paged registers */ int num_pages = __bxe_get_page_reg_num(sc); /* write addresses */ const uint32_t *write_addr = __bxe_get_page_write_ar(sc); /* number of write addresses */ int write_num = __bxe_get_page_write_num(sc); /* read addresses info */ const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc); /* number of read addresses */ int read_num = __bxe_get_page_read_num(sc); uint32_t addr, size; for (i = 0; i < num_pages; i++) { for (j = 0; j < write_num; j++) { REG_WR(sc, write_addr[j], page_addr[i]); for (k = 0; k < read_num; k++) { if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) { size = read_addr[k].size; for (n = 0; n < size; n++) { addr = read_addr[k].addr + n*4; *p++ = REG_RD(sc, addr); } } } } } return; } static int bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) { uint32_t i, j, addr; const struct wreg_addr *wreg_addr_p = NULL; if (CHIP_IS_E1(sc)) wreg_addr_p = &wreg_addr_e1; else if (CHIP_IS_E1H(sc)) wreg_addr_p = &wreg_addr_e1h; else if (CHIP_IS_E2(sc)) wreg_addr_p = &wreg_addr_e2; else if (CHIP_IS_E3A0(sc)) wreg_addr_p = &wreg_addr_e3; else if (CHIP_IS_E3B0(sc)) wreg_addr_p = &wreg_addr_e3b0; else return (-1); /* Read the idle_chk registers */ for (i = 0; i < IDLE_REGS_COUNT; i++) { if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) && IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { for (j = 0; j < idle_reg_addrs[i].size; j++) *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4); } } /* Read the regular registers */ for (i = 0; i < REGS_COUNT; i++) { if (bxe_is_reg_in_chip(sc, ®_addrs[i]) && IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { for (j = 0; j < reg_addrs[i].size; j++) *p++ = REG_RD(sc, reg_addrs[i].addr + j*4); } } /* Read the CAM registers */ if (bxe_is_wreg_in_chip(sc, wreg_addr_p) && IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { for (i = 0; i < wreg_addr_p->size; i++) { *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); /* In case of wreg_addr register, read additional registers from read_regs array */ for (j = 0; j < wreg_addr_p->read_regs_count; j++) { addr = *(wreg_addr_p->read_regs); *p++ = REG_RD(sc, addr + j*4); } } } /* Paged registers are supported in E2 & E3 only */ if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { /* Read "paged" registers */ bxe_read_pages_regs(sc, p, preset); } return 0; } int bxe_grc_dump(struct bxe_softc *sc) { int rval = 0; uint32_t preset_idx; uint8_t *buf; uint32_t size; struct dump_header *d_hdr; uint32_t i; uint32_t reg_val; uint32_t reg_addr; uint32_t cmd_offset; struct ecore_ilt *ilt = SC_ILT(sc); struct bxe_fastpath *fp; struct ilt_client_info *ilt_cli; int grc_dump_size; if (sc->grcdump_done || sc->grcdump_started) return (rval); sc->grcdump_started = 1; BLOGI(sc, "Started collecting grcdump\n"); grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); if (sc->grc_dump == NULL) { BLOGW(sc, "Unable to allocate memory for grcdump collection\n"); return(ENOMEM); } /* Disable parity attentions as long as following dump may * cause false alarms by reading never written registers. We * will re-enable parity attentions right after the dump. */ /* Disable parity on path 0 */ bxe_pretend_func(sc, 0); ecore_disable_blocks_parity(sc); /* Disable parity on path 1 */ bxe_pretend_func(sc, 1); ecore_disable_blocks_parity(sc); /* Return to current function */ bxe_pretend_func(sc, SC_ABS_FUNC(sc)); buf = sc->grc_dump; d_hdr = sc->grc_dump; d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; d_hdr->version = BNX2X_DUMP_VERSION; d_hdr->preset = DUMP_ALL_PRESETS; if (CHIP_IS_E1(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E1; } else if (CHIP_IS_E1H(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E1H; } else if (CHIP_IS_E2(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E2 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } else if (CHIP_IS_E3A0(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } else if (CHIP_IS_E3B0(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } buf += sizeof(struct dump_header); for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { /* Skip presets with IOR */ if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) || (preset_idx == 11)) continue; rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx); if (rval) break; size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); buf += size; } bxe_pretend_func(sc, 0); ecore_clear_blocks_parity(sc); ecore_enable_blocks_parity(sc); bxe_pretend_func(sc, 1); ecore_clear_blocks_parity(sc); ecore_enable_blocks_parity(sc); /* Return to current function */ bxe_pretend_func(sc, SC_ABS_FUNC(sc)); if(sc->state == BXE_STATE_OPEN) { if(sc->fw_stats_req != NULL) { BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->fw_stats_req_mapping, (uintmax_t)sc->fw_stats_data_mapping, sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size)); } if(sc->def_sb != NULL) { BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n", (void *)sc->def_sb_dma.paddr, sc->def_sb, sizeof(struct host_sp_status_block)); } if(sc->eq_dma.vaddr != NULL) { BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE); } if(sc->sp_dma.vaddr != NULL) { BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n", (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr, sizeof(struct bxe_slowpath)); } if(sc->spq_dma.vaddr != NULL) { BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE); } if(sc->gz_buf_dma.vaddr != NULL) { BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, FW_BUF_SIZE); } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL && fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL && fp->rx_sge_dma.vaddr != NULL) { BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i, (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr, sizeof(union bxe_host_hc_status_block)); BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES)); BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES)); BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i, (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr, (BCM_PAGE_SIZE * RCQ_NUM_PAGES)); BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES)); } } if(ilt != NULL ) { ilt_cli = &ilt->clients[1]; if(ilt->lines != NULL) { for (i = ilt_cli->start; i <= ilt_cli->end; i++) { BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr), ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE); } } } cmd_offset = DMAE_REG_CMD_MEM; for (i = 0; i < 224; i++) { reg_addr = (cmd_offset +(i * 4)); reg_val = REG_RD(sc, reg_addr); BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i, reg_addr, reg_val); } } BLOGI(sc, "Collection of grcdump done\n"); sc->grcdump_done = 1; return(rval); } static int bxe_add_cdev(struct bxe_softc *sc) { sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT); if (sc->eeprom == NULL) { BLOGW(sc, "Unable to alloc for eeprom size buffer\n"); return (-1); } sc->ioctl_dev = make_dev(&bxe_cdevsw, sc->ifp->if_dunit, UID_ROOT, GID_WHEEL, 0600, "%s", if_name(sc->ifp)); if (sc->ioctl_dev == NULL) { free(sc->eeprom, M_DEVBUF); sc->eeprom = NULL; return (-1); } sc->ioctl_dev->si_drv1 = sc; return (0); } static void bxe_del_cdev(struct bxe_softc *sc) { if (sc->ioctl_dev != NULL) destroy_dev(sc->ioctl_dev); if (sc->eeprom != NULL) { free(sc->eeprom, M_DEVBUF); sc->eeprom = NULL; } sc->ioctl_dev = NULL; return; } static bool bxe_is_nvram_accessible(struct bxe_softc *sc) { if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) return FALSE; return TRUE; } static int bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) { int rval = 0; if(!bxe_is_nvram_accessible(sc)) { BLOGW(sc, "Cannot access eeprom when interface is down\n"); return (-EAGAIN); } rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len); return (rval); } static int bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) { int rval = 0; if(!bxe_is_nvram_accessible(sc)) { BLOGW(sc, "Cannot access eeprom when interface is down\n"); return (-EAGAIN); } rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len); return (rval); } static int bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom) { int rval = 0; switch (eeprom->eeprom_cmd) { case BXE_EEPROM_CMD_SET_EEPROM: rval = copyin(eeprom->eeprom_data, sc->eeprom, eeprom->eeprom_data_len); if (rval) break; rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, eeprom->eeprom_data_len); break; case BXE_EEPROM_CMD_GET_EEPROM: rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, eeprom->eeprom_data_len); if (rval) { break; } rval = copyout(sc->eeprom, eeprom->eeprom_data, eeprom->eeprom_data_len); break; default: rval = EINVAL; break; } if (rval) { BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval); } return (rval); } static int bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p) { uint32_t ext_phy_config; int port = SC_PORT(sc); int cfg_idx = bxe_get_link_cfg_idx(sc); dev_p->supported = sc->port.supported[cfg_idx] | (sc->port.supported[cfg_idx ^ 1] & (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE)); dev_p->advertising = sc->port.advertising[cfg_idx]; if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type == ELINK_ETH_PHY_SFP_1G_FIBER) { dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full); dev_p->advertising &= ~(ADVERTISED_10000baseT_Full); } if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up && !(sc->flags & BXE_MF_FUNC_DIS)) { dev_p->duplex = sc->link_vars.duplex; if (IS_MF(sc) && !BXE_NOMCP(sc)) dev_p->speed = bxe_get_mf_speed(sc); else dev_p->speed = sc->link_vars.line_speed; } else { dev_p->duplex = DUPLEX_UNKNOWN; dev_p->speed = SPEED_UNKNOWN; } dev_p->port = bxe_media_detect(sc); ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) dev_p->phy_address = sc->port.phy_addr; else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config); else dev_p->phy_address = 0; if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) dev_p->autoneg = AUTONEG_ENABLE; else dev_p->autoneg = AUTONEG_DISABLE; return 0; } static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct bxe_softc *sc; int rval = 0; device_t pci_dev; bxe_grcdump_t *dump = NULL; int grc_dump_size; bxe_drvinfo_t *drv_infop = NULL; bxe_dev_setting_t *dev_p; bxe_dev_setting_t dev_set; bxe_get_regs_t *reg_p; bxe_reg_rdw_t *reg_rdw_p; bxe_pcicfg_rdw_t *cfg_rdw_p; bxe_perm_mac_addr_t *mac_addr_p; if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) return ENXIO; pci_dev= sc->dev; dump = (bxe_grcdump_t *)data; switch(cmd) { case BXE_GRC_DUMP_SIZE: dump->pci_func = sc->pcie_func; dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); break; case BXE_GRC_DUMP: grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) || (dump->grcdump_size < grc_dump_size)) { rval = EINVAL; break; } if((sc->trigger_grcdump) && (!sc->grcdump_done) && (!sc->grcdump_started)) { rval = bxe_grc_dump(sc); } if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) && (sc->grc_dump != NULL)) { dump->grcdump_dwords = grc_dump_size >> 2; rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); free(sc->grc_dump, M_DEVBUF); sc->grc_dump = NULL; sc->grcdump_started = 0; sc->grcdump_done = 0; } break; case BXE_DRV_INFO: drv_infop = (bxe_drvinfo_t *)data; snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe"); snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s", BXE_DRIVER_VERSION); snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s", sc->devinfo.bc_ver_str); snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH, "%s", sc->fw_ver_str); drv_infop->eeprom_dump_len = sc->devinfo.flash_size; drv_infop->reg_dump_len = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d", sc->pcie_bus, sc->pcie_device, sc->pcie_func); break; case BXE_DEV_SETTING: dev_p = (bxe_dev_setting_t *)data; bxe_get_settings(sc, &dev_set); dev_p->supported = dev_set.supported; dev_p->advertising = dev_set.advertising; dev_p->speed = dev_set.speed; dev_p->duplex = dev_set.duplex; dev_p->port = dev_set.port; dev_p->phy_address = dev_set.phy_address; dev_p->autoneg = dev_set.autoneg; break; case BXE_GET_REGS: reg_p = (bxe_get_regs_t *)data; grc_dump_size = reg_p->reg_buf_len; if((!sc->grcdump_done) && (!sc->grcdump_started)) { bxe_grc_dump(sc); } if((sc->grcdump_done) && (sc->grcdump_started) && (sc->grc_dump != NULL)) { rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size); free(sc->grc_dump, M_DEVBUF); sc->grc_dump = NULL; sc->grcdump_started = 0; sc->grcdump_done = 0; } break; case BXE_RDW_REG: reg_rdw_p = (bxe_reg_rdw_t *)data; if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) && (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id); if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) && (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val); break; case BXE_RDW_PCICFG: cfg_rdw_p = (bxe_pcicfg_rdw_t *)data; if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) { cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_width); } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) { pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val, cfg_rdw_p->cfg_width); } else { BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n"); } break; case BXE_MAC_ADDR: mac_addr_p = (bxe_perm_mac_addr_t *)data; snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s", sc->mac_addr_str); break; case BXE_EEPROM: rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data); break; default: break; } return (rval); } #ifdef DEBUGNET static void bxe_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) { struct bxe_softc *sc; sc = if_getsoftc(ifp); BXE_CORE_LOCK(sc); *nrxr = sc->num_queues; *ncl = DEBUGNET_MAX_IN_FLIGHT; *clsize = sc->fp[0].mbuf_alloc_size; BXE_CORE_UNLOCK(sc); } static void bxe_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused) { } static int bxe_debugnet_transmit(struct ifnet *ifp, struct mbuf *m) { struct bxe_softc *sc; int error; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || !sc->link_vars.link_up) return (ENOENT); error = bxe_tx_encap(&sc->fp[0], &m); if (error != 0 && m != NULL) m_freem(m); return (error); } static int bxe_debugnet_poll(struct ifnet *ifp, int count) { struct bxe_softc *sc; int i; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || !sc->link_vars.link_up) return (ENOENT); for (i = 0; i < sc->num_queues; i++) (void)bxe_rxeof(sc, &sc->fp[i]); (void)bxe_txeof(sc, &sc->fp[0]); return (0); } #endif /* DEBUGNET */ Index: head/sys/dev/etherswitch/mtkswitch/mtkswitch_mt7620.c =================================================================== --- head/sys/dev/etherswitch/mtkswitch/mtkswitch_mt7620.c (revision 359440) +++ head/sys/dev/etherswitch/mtkswitch/mtkswitch_mt7620.c (revision 359441) @@ -1,564 +1,564 @@ /*- * Copyright (c) 2016 Stanislav Galabov. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mtkswitch_phy_read_locked(struct mtkswitch_softc *sc, int phy, int reg) { uint32_t data; MTKSWITCH_WRITE(sc, MTKSWITCH_PIAC, PIAC_PHY_ACS_ST | PIAC_MDIO_ST | (reg << PIAC_MDIO_REG_ADDR_OFF) | (phy << PIAC_MDIO_PHY_ADDR_OFF) | PIAC_MDIO_CMD_READ); while ((data = MTKSWITCH_READ(sc, MTKSWITCH_PIAC)) & PIAC_PHY_ACS_ST); return ((int)(data & PIAC_MDIO_RW_DATA_MASK)); } static int mtkswitch_phy_read(device_t dev, int phy, int reg) { struct mtkswitch_softc *sc = device_get_softc(dev); int data; if ((phy < 0 || phy >= 32) || (reg < 0 || reg >= 32)) return (ENXIO); MTKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); MTKSWITCH_LOCK(sc); data = mtkswitch_phy_read_locked(sc, phy, reg); MTKSWITCH_UNLOCK(sc); return (data); } static int mtkswitch_phy_write_locked(struct mtkswitch_softc *sc, int phy, int reg, int val) { MTKSWITCH_WRITE(sc, MTKSWITCH_PIAC, PIAC_PHY_ACS_ST | PIAC_MDIO_ST | (reg << PIAC_MDIO_REG_ADDR_OFF) | (phy << PIAC_MDIO_PHY_ADDR_OFF) | (val & PIAC_MDIO_RW_DATA_MASK) | PIAC_MDIO_CMD_WRITE); while (MTKSWITCH_READ(sc, MTKSWITCH_PIAC) & PIAC_PHY_ACS_ST); return (0); } static int mtkswitch_phy_write(device_t dev, int phy, int reg, int val) { struct mtkswitch_softc *sc = device_get_softc(dev); int res; if ((phy < 0 || phy >= 32) || (reg < 0 || reg >= 32)) return (ENXIO); MTKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); MTKSWITCH_LOCK(sc); res = mtkswitch_phy_write_locked(sc, phy, reg, val); MTKSWITCH_UNLOCK(sc); return (res); } static uint32_t mtkswitch_reg_read32(struct mtkswitch_softc *sc, int reg) { return (MTKSWITCH_READ(sc, reg)); } static uint32_t mtkswitch_reg_write32(struct mtkswitch_softc *sc, int reg, uint32_t val) { MTKSWITCH_WRITE(sc, reg, val); return (0); } static uint32_t mtkswitch_reg_read32_mt7621(struct mtkswitch_softc *sc, int reg) { uint32_t low, hi; mtkswitch_phy_write_locked(sc, MTKSWITCH_GLOBAL_PHY, MTKSWITCH_GLOBAL_REG, MTKSWITCH_REG_ADDR(reg)); low = mtkswitch_phy_read_locked(sc, MTKSWITCH_GLOBAL_PHY, MTKSWITCH_REG_LO(reg)); hi = mtkswitch_phy_read_locked(sc, MTKSWITCH_GLOBAL_PHY, - MTKSWITCH_REG_HI(reg));; + MTKSWITCH_REG_HI(reg)); return (low | (hi << 16)); } static uint32_t mtkswitch_reg_write32_mt7621(struct mtkswitch_softc *sc, int reg, uint32_t val) { mtkswitch_phy_write_locked(sc, MTKSWITCH_GLOBAL_PHY, MTKSWITCH_GLOBAL_REG, MTKSWITCH_REG_ADDR(reg)); mtkswitch_phy_write_locked(sc, MTKSWITCH_GLOBAL_PHY, MTKSWITCH_REG_LO(reg), MTKSWITCH_VAL_LO(val)); mtkswitch_phy_write_locked(sc, MTKSWITCH_GLOBAL_PHY, MTKSWITCH_REG_HI(reg), MTKSWITCH_VAL_HI(val)); return (0); } static int mtkswitch_reg_read(device_t dev, int reg) { struct mtkswitch_softc *sc = device_get_softc(dev); uint32_t val; val = sc->hal.mtkswitch_read(sc, MTKSWITCH_REG32(reg)); if (MTKSWITCH_IS_HI16(reg)) return (MTKSWITCH_HI16(val)); return (MTKSWITCH_LO16(val)); } static int mtkswitch_reg_write(device_t dev, int reg, int val) { struct mtkswitch_softc *sc = device_get_softc(dev); uint32_t tmp; tmp = sc->hal.mtkswitch_read(sc, MTKSWITCH_REG32(reg)); if (MTKSWITCH_IS_HI16(reg)) { tmp &= MTKSWITCH_LO16_MSK; tmp |= MTKSWITCH_TO_HI16(val); } else { tmp &= MTKSWITCH_HI16_MSK; tmp |= MTKSWITCH_TO_LO16(val); } sc->hal.mtkswitch_write(sc, MTKSWITCH_REG32(reg), tmp); return (0); } static int mtkswitch_reset(struct mtkswitch_softc *sc) { /* We don't reset the switch for now */ return (0); } static int mtkswitch_hw_setup(struct mtkswitch_softc *sc) { /* * TODO: parse the device tree and see if we need to configure * ports, etc. differently. For now we fallback to defaults. */ /* Called early and hence unlocked */ return (0); } static int mtkswitch_hw_global_setup(struct mtkswitch_softc *sc) { /* Currently does nothing */ /* Called early and hence unlocked */ return (0); } static void mtkswitch_port_init(struct mtkswitch_softc *sc, int port) { uint32_t val; /* Called early and hence unlocked */ /* Set the port to secure mode */ val = sc->hal.mtkswitch_read(sc, MTKSWITCH_PCR(port)); val |= PCR_PORT_VLAN_SECURE; sc->hal.mtkswitch_write(sc, MTKSWITCH_PCR(port), val); /* Set port's vlan_attr to user port */ val = sc->hal.mtkswitch_read(sc, MTKSWITCH_PVC(port)); val &= ~PVC_VLAN_ATTR_MASK; sc->hal.mtkswitch_write(sc, MTKSWITCH_PVC(port), val); val = PMCR_CFG_DEFAULT; if (port == sc->cpuport) val |= PMCR_FORCE_LINK | PMCR_FORCE_DPX | PMCR_FORCE_SPD_1000 | PMCR_FORCE_MODE; /* Set port's MAC to default settings */ sc->hal.mtkswitch_write(sc, MTKSWITCH_PMCR(port), val); } static uint32_t mtkswitch_get_port_status(struct mtkswitch_softc *sc, int port) { uint32_t val, res, tmp; MTKSWITCH_LOCK_ASSERT(sc, MA_OWNED); res = 0; val = sc->hal.mtkswitch_read(sc, MTKSWITCH_PMSR(port)); if (val & PMSR_MAC_LINK_STS) res |= MTKSWITCH_LINK_UP; if (val & PMSR_MAC_DPX_STS) res |= MTKSWITCH_DUPLEX; tmp = PMSR_MAC_SPD(val); if (tmp == 0) res |= MTKSWITCH_SPEED_10; else if (tmp == 1) res |= MTKSWITCH_SPEED_100; else if (tmp == 2) res |= MTKSWITCH_SPEED_1000; if (val & PMSR_TX_FC_STS) res |= MTKSWITCH_TXFLOW; if (val & PMSR_RX_FC_STS) res |= MTKSWITCH_RXFLOW; return (res); } static int mtkswitch_atu_flush(struct mtkswitch_softc *sc) { MTKSWITCH_LOCK_ASSERT(sc, MA_OWNED); /* Flush all non-static MAC addresses */ while (sc->hal.mtkswitch_read(sc, MTKSWITCH_ATC) & ATC_BUSY); sc->hal.mtkswitch_write(sc, MTKSWITCH_ATC, ATC_BUSY | ATC_AC_MAT_NON_STATIC_MACS | ATC_AC_CMD_CLEAN); while (sc->hal.mtkswitch_read(sc, MTKSWITCH_ATC) & ATC_BUSY); return (0); } static int mtkswitch_port_vlan_setup(struct mtkswitch_softc *sc, etherswitch_port_t *p) { int err; /* * Port behaviour wrt tag/untag/stack is currently defined per-VLAN. * So we say we don't support it here. */ if ((p->es_flags & (ETHERSWITCH_PORT_DOUBLE_TAG | ETHERSWITCH_PORT_ADDTAG | ETHERSWITCH_PORT_STRIPTAG)) != 0) return (ENOTSUP); MTKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); MTKSWITCH_LOCK(sc); /* Set the PVID */ if (p->es_pvid != 0) { err = sc->hal.mtkswitch_vlan_set_pvid(sc, p->es_port, p->es_pvid); if (err != 0) { MTKSWITCH_UNLOCK(sc); return (err); } } MTKSWITCH_UNLOCK(sc); return (0); } static int mtkswitch_port_vlan_get(struct mtkswitch_softc *sc, etherswitch_port_t *p) { MTKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); MTKSWITCH_LOCK(sc); /* Retrieve the PVID */ sc->hal.mtkswitch_vlan_get_pvid(sc, p->es_port, &p->es_pvid); /* * Port flags are not supported at the moment. * Port's tag/untag/stack behaviour is defined per-VLAN. */ p->es_flags = 0; MTKSWITCH_UNLOCK(sc); return (0); } static void mtkswitch_invalidate_vlan(struct mtkswitch_softc *sc, uint32_t vid) { while (sc->hal.mtkswitch_read(sc, MTKSWITCH_VTCR) & VTCR_BUSY); sc->hal.mtkswitch_write(sc, MTKSWITCH_VTCR, VTCR_BUSY | VTCR_FUNC_VID_INVALID | (vid & VTCR_VID_MASK)); while (sc->hal.mtkswitch_read(sc, MTKSWITCH_VTCR) & VTCR_BUSY); } static void mtkswitch_vlan_init_hw(struct mtkswitch_softc *sc) { uint32_t val, vid, i; MTKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); MTKSWITCH_LOCK(sc); /* Reset all VLANs to defaults first */ for (i = 0; i < sc->info.es_nvlangroups; i++) { mtkswitch_invalidate_vlan(sc, i); if (sc->sc_switchtype == MTK_SWITCH_MT7620) { val = sc->hal.mtkswitch_read(sc, MTKSWITCH_VTIM(i)); val &= ~(VTIM_MASK << VTIM_OFF(i)); val |= ((i + 1) << VTIM_OFF(i)); sc->hal.mtkswitch_write(sc, MTKSWITCH_VTIM(i), val); } } /* Now, add all ports as untagged members of VLAN 1 */ if (sc->sc_switchtype == MTK_SWITCH_MT7620) { /* MT7620 uses vid index instead of actual vid */ vid = 0; } else { /* MT7621 uses the vid itself */ vid = 1; } val = VAWD1_IVL_MAC | VAWD1_VTAG_EN | VAWD1_VALID; for (i = 0; i < sc->info.es_nports; i++) val |= VAWD1_PORT_MEMBER(i); sc->hal.mtkswitch_write(sc, MTKSWITCH_VAWD1, val); sc->hal.mtkswitch_write(sc, MTKSWITCH_VAWD2, 0); val = VTCR_BUSY | VTCR_FUNC_VID_WRITE | vid; sc->hal.mtkswitch_write(sc, MTKSWITCH_VTCR, val); /* Set all port PVIDs to 1 */ for (i = 0; i < sc->info.es_nports; i++) { sc->hal.mtkswitch_vlan_set_pvid(sc, i, 1); } MTKSWITCH_UNLOCK(sc); } static int mtkswitch_vlan_getvgroup(struct mtkswitch_softc *sc, etherswitch_vlangroup_t *v) { uint32_t val, i; MTKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); if ((sc->vlan_mode != ETHERSWITCH_VLAN_DOT1Q) || (v->es_vlangroup > sc->info.es_nvlangroups)) return (EINVAL); /* Reset the member ports. */ v->es_untagged_ports = 0; v->es_member_ports = 0; /* Not supported for now */ v->es_fid = 0; MTKSWITCH_LOCK(sc); if (sc->sc_switchtype == MTK_SWITCH_MT7620) { v->es_vid = (sc->hal.mtkswitch_read(sc, MTKSWITCH_VTIM(v->es_vlangroup)) >> VTIM_OFF(v->es_vlangroup)) & VTIM_MASK; } else { v->es_vid = v->es_vlangroup; } while (sc->hal.mtkswitch_read(sc, MTKSWITCH_VTCR) & VTCR_BUSY); sc->hal.mtkswitch_write(sc, MTKSWITCH_VTCR, VTCR_BUSY | VTCR_FUNC_VID_READ | (v->es_vlangroup & VTCR_VID_MASK)); while ((val = sc->hal.mtkswitch_read(sc, MTKSWITCH_VTCR)) & VTCR_BUSY); if (val & VTCR_IDX_INVALID) { MTKSWITCH_UNLOCK(sc); return (0); } val = sc->hal.mtkswitch_read(sc, MTKSWITCH_VAWD1); if (val & VAWD1_VALID) v->es_vid |= ETHERSWITCH_VID_VALID; else { MTKSWITCH_UNLOCK(sc); return (0); } v->es_member_ports = (val >> VAWD1_MEMBER_OFF) & VAWD1_MEMBER_MASK; val = sc->hal.mtkswitch_read(sc, MTKSWITCH_VAWD2); for (i = 0; i < sc->info.es_nports; i++) { if ((val & VAWD2_PORT_MASK(i)) == VAWD2_PORT_UNTAGGED(i)) v->es_untagged_ports |= (1<vlan_mode != ETHERSWITCH_VLAN_DOT1Q) || (v->es_vlangroup > sc->info.es_nvlangroups)) return (EINVAL); /* We currently don't support FID */ if (v->es_fid != 0) return (EINVAL); MTKSWITCH_LOCK(sc); while (sc->hal.mtkswitch_read(sc, MTKSWITCH_VTCR) & VTCR_BUSY); if (sc->sc_switchtype == MTK_SWITCH_MT7620) { val = sc->hal.mtkswitch_read(sc, MTKSWITCH_VTIM(v->es_vlangroup)); val &= ~(VTIM_MASK << VTIM_OFF(v->es_vlangroup)); val |= ((v->es_vid & VTIM_MASK) << VTIM_OFF(v->es_vlangroup)); sc->hal.mtkswitch_write(sc, MTKSWITCH_VTIM(v->es_vlangroup), val); vid = v->es_vlangroup; } else vid = v->es_vid; /* We use FID 0 */ val = VAWD1_IVL_MAC | VAWD1_VTAG_EN | VAWD1_VALID; val |= ((v->es_member_ports & VAWD1_MEMBER_MASK) << VAWD1_MEMBER_OFF); sc->hal.mtkswitch_write(sc, MTKSWITCH_VAWD1, val); /* Set tagged ports */ val = 0; for (i = 0; i < sc->info.es_nports; i++) if (((1<es_untagged_ports) == 0) val |= VAWD2_PORT_TAGGED(i); sc->hal.mtkswitch_write(sc, MTKSWITCH_VAWD2, val); /* Write the VLAN entry */ sc->hal.mtkswitch_write(sc, MTKSWITCH_VTCR, VTCR_BUSY | VTCR_FUNC_VID_WRITE | (vid & VTCR_VID_MASK)); while ((val = sc->hal.mtkswitch_read(sc, MTKSWITCH_VTCR)) & VTCR_BUSY); MTKSWITCH_UNLOCK(sc); if (val & VTCR_IDX_INVALID) return (EINVAL); return (0); } static int mtkswitch_vlan_get_pvid(struct mtkswitch_softc *sc, int port, int *pvid) { MTKSWITCH_LOCK_ASSERT(sc, MA_OWNED); *pvid = sc->hal.mtkswitch_read(sc, MTKSWITCH_PPBV1(port)); *pvid = PPBV_VID_FROM_REG(*pvid); return (0); } static int mtkswitch_vlan_set_pvid(struct mtkswitch_softc *sc, int port, int pvid) { uint32_t val; MTKSWITCH_LOCK_ASSERT(sc, MA_OWNED); val = PPBV_VID(pvid & PPBV_VID_MASK); sc->hal.mtkswitch_write(sc, MTKSWITCH_PPBV1(port), val); sc->hal.mtkswitch_write(sc, MTKSWITCH_PPBV2(port), val); return (0); } extern void mtk_attach_switch_mt7620(struct mtkswitch_softc *sc) { sc->portmap = 0x7f; sc->phymap = 0x1f; sc->info.es_nports = 7; sc->info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q; sc->info.es_nvlangroups = 16; sprintf(sc->info.es_name, "Mediatek GSW"); if (sc->sc_switchtype == MTK_SWITCH_MT7621) { sc->hal.mtkswitch_read = mtkswitch_reg_read32_mt7621; sc->hal.mtkswitch_write = mtkswitch_reg_write32_mt7621; sc->info.es_nvlangroups = 4096; } else { sc->hal.mtkswitch_read = mtkswitch_reg_read32; sc->hal.mtkswitch_write = mtkswitch_reg_write32; } sc->hal.mtkswitch_reset = mtkswitch_reset; sc->hal.mtkswitch_hw_setup = mtkswitch_hw_setup; sc->hal.mtkswitch_hw_global_setup = mtkswitch_hw_global_setup; sc->hal.mtkswitch_port_init = mtkswitch_port_init; sc->hal.mtkswitch_get_port_status = mtkswitch_get_port_status; sc->hal.mtkswitch_atu_flush = mtkswitch_atu_flush; sc->hal.mtkswitch_port_vlan_setup = mtkswitch_port_vlan_setup; sc->hal.mtkswitch_port_vlan_get = mtkswitch_port_vlan_get; sc->hal.mtkswitch_vlan_init_hw = mtkswitch_vlan_init_hw; sc->hal.mtkswitch_vlan_getvgroup = mtkswitch_vlan_getvgroup; sc->hal.mtkswitch_vlan_setvgroup = mtkswitch_vlan_setvgroup; sc->hal.mtkswitch_vlan_get_pvid = mtkswitch_vlan_get_pvid; sc->hal.mtkswitch_vlan_set_pvid = mtkswitch_vlan_set_pvid; sc->hal.mtkswitch_phy_read = mtkswitch_phy_read; sc->hal.mtkswitch_phy_write = mtkswitch_phy_write; sc->hal.mtkswitch_reg_read = mtkswitch_reg_read; sc->hal.mtkswitch_reg_write = mtkswitch_reg_write; } Index: head/sys/dev/hptnr/hptnr_os_bsd.c =================================================================== --- head/sys/dev/hptnr/hptnr_os_bsd.c (revision 359440) +++ head/sys/dev/hptnr/hptnr_os_bsd.c (revision 359441) @@ -1,304 +1,304 @@ /* $Id: os_bsd.c,v 1.13 2010/05/11 03:12:11 lcn Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005-2011 HighPoint Technologies, Inc. All Rights Reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include BUS_ADDRESS get_dmapool_phy_addr(void *osext, void * dmapool_virt_addr); /* hardware access */ HPT_U8 os_inb (void *port) { return inb((unsigned)(HPT_UPTR)port); } HPT_U16 os_inw (void *port) { return inw((unsigned)(HPT_UPTR)port); } HPT_U32 os_inl (void *port) { return inl((unsigned)(HPT_UPTR)port); } void os_outb (void *port, HPT_U8 value) { outb((unsigned)(HPT_UPTR)port, (value)); } void os_outw (void *port, HPT_U16 value) { outw((unsigned)(HPT_UPTR)port, (value)); } void os_outl (void *port, HPT_U32 value) { outl((unsigned)(HPT_UPTR)port, (value)); } void os_insw (void *port, HPT_U16 *buffer, HPT_U32 count) { insw((unsigned)(HPT_UPTR)port, (void *)buffer, count); } void os_outsw(void *port, HPT_U16 *buffer, HPT_U32 count) { outsw((unsigned)(HPT_UPTR)port, (void *)buffer, count); } HPT_U32 __dummy_reg = 0; /* PCI configuration space */ HPT_U8 os_pci_readb (void *osext, HPT_U8 offset) { return pci_read_config(((PHBA)osext)->pcidev, offset, 1); } HPT_U16 os_pci_readw (void *osext, HPT_U8 offset) { return pci_read_config(((PHBA)osext)->pcidev, offset, 2); } HPT_U32 os_pci_readl (void *osext, HPT_U8 offset) { return pci_read_config(((PHBA)osext)->pcidev, offset, 4); } void os_pci_writeb (void *osext, HPT_U8 offset, HPT_U8 value) { pci_write_config(((PHBA)osext)->pcidev, offset, value, 1); } void os_pci_writew (void *osext, HPT_U8 offset, HPT_U16 value) { pci_write_config(((PHBA)osext)->pcidev, offset, value, 2); } void os_pci_writel (void *osext, HPT_U8 offset, HPT_U32 value) { pci_write_config(((PHBA)osext)->pcidev, offset, value, 4); } BUS_ADDRESS get_dmapool_phy_addr(void *osext, void * dmapool_virt_addr) { return (BUS_ADDRESS)vtophys(dmapool_virt_addr); } HPT_U32 pcicfg_read_dword(HPT_U8 bus, HPT_U8 dev, HPT_U8 func, HPT_U8 reg) { - return (HPT_U32)pci_cfgregread(bus, dev, func, reg, 4);; + return (HPT_U32)pci_cfgregread(bus, dev, func, reg, 4); }/* PCI space access */ void *os_map_pci_bar( void *osext, int index, HPT_U32 offset, HPT_U32 length ) { PHBA hba = (PHBA)osext; HPT_U32 base; hba->pcibar[index].rid = 0x10 + index * 4; base = pci_read_config(hba->pcidev, hba->pcibar[index].rid, 4); if (base & 1) { hba->pcibar[index].type = SYS_RES_IOPORT; hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev, hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE); hba->pcibar[index].base = (void *)(unsigned long)(base & ~0x1); } else { hba->pcibar[index].type = SYS_RES_MEMORY; hba->pcibar[index].res = bus_alloc_resource_any(hba->pcidev, hba->pcibar[index].type, &hba->pcibar[index].rid, RF_ACTIVE); hba->pcibar[index].base = (char *)rman_get_virtual(hba->pcibar[index].res) + offset; } return hba->pcibar[index].base; } void os_unmap_pci_bar(void *osext, void *base) { PHBA hba = (PHBA)osext; int index; for (index=0; index<6; index++) { if (hba->pcibar[index].base==base) { bus_release_resource(hba->pcidev, hba->pcibar[index].type, hba->pcibar[index].rid, hba->pcibar[index].res); hba->pcibar[index].base = 0; return; } } } void freelist_reserve(struct freelist *list, void *osext, HPT_UINT size, HPT_UINT count) { PVBUS_EXT vbus_ext = osext; if (vbus_ext->ext_type!=EXT_TYPE_VBUS) vbus_ext = ((PHBA)osext)->vbus_ext; list->next = vbus_ext->freelist_head; vbus_ext->freelist_head = list; list->dma = 0; list->size = size; list->head = 0; #if DBG list->reserved_count = #endif list->count = count; } void *freelist_get(struct freelist *list) { void * result; if (list->count) { HPT_ASSERT(list->head); result = list->head; list->head = *(void **)result; list->count--; return result; } return 0; } void freelist_put(struct freelist * list, void *p) { HPT_ASSERT(list->dma==0); list->count++; *(void **)p = list->head; list->head = p; } void freelist_reserve_dma(struct freelist *list, void *osext, HPT_UINT size, HPT_UINT alignment, HPT_UINT count) { PVBUS_EXT vbus_ext = osext; if (vbus_ext->ext_type!=EXT_TYPE_VBUS) vbus_ext = ((PHBA)osext)->vbus_ext; list->next = vbus_ext->freelist_dma_head; vbus_ext->freelist_dma_head = list; list->dma = 1; list->alignment = alignment; list->size = size; list->head = 0; #if DBG list->reserved_count = #endif list->count = count; } void *freelist_get_dma(struct freelist *list, BUS_ADDRESS *busaddr) { void *result; HPT_ASSERT(list->dma); result = freelist_get(list); if (result) *busaddr = *(BUS_ADDRESS *)((void **)result+1); return result; } void freelist_put_dma(struct freelist *list, void *p, BUS_ADDRESS busaddr) { HPT_ASSERT(list->dma); list->count++; *(void **)p = list->head; *(BUS_ADDRESS *)((void **)p+1) = busaddr; list->head = p; } HPT_U32 os_get_stamp(void) { HPT_U32 stamp; do { stamp = random(); } while (stamp==0); return stamp; } void os_stallexec(HPT_U32 microseconds) { DELAY(microseconds); } static void os_timer_for_ldm(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; ldm_on_timer((PVBUS)vbus_ext->vbus); } void os_request_timer(void * osext, HPT_U32 interval) { PVBUS_EXT vbus_ext = osext; HPT_ASSERT(vbus_ext->ext_type==EXT_TYPE_VBUS); callout_reset_sbt(&vbus_ext->timer, SBT_1US * interval, 0, os_timer_for_ldm, vbus_ext, 0); } HPT_TIME os_query_time(void) { return ticks * (1000000 / hz); } void os_schedule_task(void *osext, OSM_TASK *task) { PVBUS_EXT vbus_ext = osext; HPT_ASSERT(task->next==0); if (vbus_ext->tasks==0) vbus_ext->tasks = task; else { OSM_TASK *t = vbus_ext->tasks; while (t->next) t = t->next; t->next = task; } if (vbus_ext->worker.ta_context) TASK_ENQUEUE(&vbus_ext->worker); } int os_revalidate_device(void *osext, int id) { return 0; } int os_query_remove_device(void *osext, int id) { return 0; } HPT_U8 os_get_vbus_seq(void *osext) { return ((PVBUS_EXT)osext)->sim->path_id; } int os_printk(char *fmt, ...) { va_list args; static char buf[512]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); return printf("%s: %s\n", driver_name, buf); } #if DBG void os_check_stack(const char *location, int size){} void __os_dbgbreak(const char *file, int line) { printf("*** break at %s:%d ***", file, line); while (1); } int hpt_dbg_level = 1; #endif Index: head/sys/dev/nvme/nvme_qpair.c =================================================================== --- head/sys/dev/nvme/nvme_qpair.c (revision 359440) +++ head/sys/dev/nvme/nvme_qpair.c (revision 359441) @@ -1,1280 +1,1280 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2012-2014 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include "nvme_private.h" typedef enum error_print { ERROR_PRINT_NONE, ERROR_PRINT_NO_RETRY, ERROR_PRINT_ALL } error_print_t; #define DO_NOT_RETRY 1 static void _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req); static void nvme_qpair_destroy(struct nvme_qpair *qpair); struct nvme_opcode_string { uint16_t opc; const char * str; }; static struct nvme_opcode_string admin_opcode[] = { { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" }, { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" }, { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" }, { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" }, { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" }, { NVME_OPC_IDENTIFY, "IDENTIFY" }, { NVME_OPC_ABORT, "ABORT" }, { NVME_OPC_SET_FEATURES, "SET FEATURES" }, { NVME_OPC_GET_FEATURES, "GET FEATURES" }, { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" }, { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" }, { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" }, { NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" }, { NVME_OPC_NAMESPACE_ATTACHMENT, "NAMESPACE ATTACHMENT" }, { NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" }, { NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" }, { NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" }, { NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" }, { NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" }, { NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" }, { NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" }, { NVME_OPC_FORMAT_NVM, "FORMAT NVM" }, { NVME_OPC_SECURITY_SEND, "SECURITY SEND" }, { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" }, { NVME_OPC_SANITIZE, "SANITIZE" }, { NVME_OPC_GET_LBA_STATUS, "GET LBA STATUS" }, { 0xFFFF, "ADMIN COMMAND" } }; static struct nvme_opcode_string io_opcode[] = { { NVME_OPC_FLUSH, "FLUSH" }, { NVME_OPC_WRITE, "WRITE" }, { NVME_OPC_READ, "READ" }, { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" }, { NVME_OPC_COMPARE, "COMPARE" }, { NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" }, { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" }, { NVME_OPC_VERIFY, "VERIFY" }, { NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" }, { NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" }, { NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" }, { NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" }, { 0xFFFF, "IO COMMAND" } }; static const char * get_admin_opcode_string(uint16_t opc) { struct nvme_opcode_string *entry; entry = admin_opcode; while (entry->opc != 0xFFFF) { if (entry->opc == opc) return (entry->str); entry++; } return (entry->str); } static const char * get_io_opcode_string(uint16_t opc) { struct nvme_opcode_string *entry; entry = io_opcode; while (entry->opc != 0xFFFF) { if (entry->opc == opc) return (entry->str); entry++; } return (entry->str); } static void nvme_admin_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) { nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x " "cdw10:%08x cdw11:%08x\n", get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid, le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11)); } static void nvme_io_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) { switch (cmd->opc) { case NVME_OPC_WRITE: case NVME_OPC_READ: case NVME_OPC_WRITE_UNCORRECTABLE: case NVME_OPC_COMPARE: case NVME_OPC_WRITE_ZEROES: case NVME_OPC_VERIFY: nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d " "lba:%llu len:%d\n", get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid), ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10), (le32toh(cmd->cdw12) & 0xFFFF) + 1); break; case NVME_OPC_FLUSH: case NVME_OPC_DATASET_MANAGEMENT: case NVME_OPC_RESERVATION_REGISTER: case NVME_OPC_RESERVATION_REPORT: case NVME_OPC_RESERVATION_ACQUIRE: case NVME_OPC_RESERVATION_RELEASE: nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n", get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid)); break; default: nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n", get_io_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid, le32toh(cmd->nsid)); break; } } static void nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) { if (qpair->id == 0) nvme_admin_qpair_print_command(qpair, cmd); else nvme_io_qpair_print_command(qpair, cmd); if (nvme_verbose_cmd_dump) { nvme_printf(qpair->ctrlr, "nsid:%#x rsvd2:%#x rsvd3:%#x mptr:%#jx prp1:%#jx prp2:%#jx\n", cmd->nsid, cmd->rsvd2, cmd->rsvd3, (uintmax_t)cmd->mptr, (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2); nvme_printf(qpair->ctrlr, "cdw10: %#x cdw11:%#x cdw12:%#x cdw13:%#x cdw14:%#x cdw15:%#x\n", cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14, cmd->cdw15); } } struct nvme_status_string { uint16_t sc; const char * str; }; static struct nvme_status_string generic_status[] = { { NVME_SC_SUCCESS, "SUCCESS" }, { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, { NVME_SC_INVALID_FIELD, "INVALID_FIELD" }, { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, { NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" }, { NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" }, { NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" }, { NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" }, { NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" }, { NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" }, { NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" }, { NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" }, { NVME_SC_OPERATION_DENIED, "OPERATION DENIED" }, { NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" }, { NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" }, { NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" }, { NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" }, { NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" }, { NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" }, { NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" }, { NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" }, { NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" }, { NVME_SC_NAMESPACE_IS_WRITE_PROTECTED, "NAMESPACE IS WRITE PROTECTED" }, { NVME_SC_COMMAND_INTERRUPTED, "COMMAND INTERRUPTED" }, { NVME_SC_TRANSIENT_TRANSPORT_ERROR, "TRANSIENT TRANSPORT ERROR" }, { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, { NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, { NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" }, { 0xFFFF, "GENERIC" } }; static struct nvme_status_string command_specific_status[] = { { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" }, { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" }, { NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" }, { NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" }, { NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" }, { NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" }, { NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" }, { NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" }, { NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" }, { NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" }, { NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" }, { NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" }, { NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" }, { NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" }, { NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" }, { NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" }, { NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" }, { NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" }, { NVME_SC_SELT_TEST_IN_PROGRESS, "DEVICE SELT-TEST IN PROGRESS" }, { NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" }, { NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" }, { NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" }, { NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" }, { NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" }, { NVME_SC_SANITIZE_PROHIBITED_WPMRE, "SANITIZE PROHIBITED WRITE PERSISTENT MEMORY REGION ENABLED" }, { NVME_SC_ANA_GROUP_ID_INVALID, "ANA GROUP IDENTIFIED INVALID" }, { NVME_SC_ANA_ATTACH_FAILED, "ANA ATTACH FAILED" }, { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" }, { 0xFFFF, "COMMAND SPECIFIC" } }; static struct nvme_status_string media_error_status[] = { { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, { NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" }, { 0xFFFF, "MEDIA ERROR" } }; static struct nvme_status_string path_related_status[] = { { NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" }, { NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS, "ASYMMETRIC ACCESS PERSISTENT LOSS" }, { NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE, "ASYMMETRIC ACCESS INACCESSIBLE" }, { NVME_SC_ASYMMETRIC_ACCESS_TRANSITION, "ASYMMETRIC ACCESS TRANSITION" }, { NVME_SC_CONTROLLER_PATHING_ERROR, "CONTROLLER PATHING ERROR" }, { NVME_SC_HOST_PATHING_ERROR, "HOST PATHING ERROR" }, { NVME_SC_COMMAND_ABOTHED_BY_HOST, "COMMAND ABOTHED BY HOST" }, { 0xFFFF, "PATH RELATED" }, }; static const char * get_status_string(uint16_t sct, uint16_t sc) { struct nvme_status_string *entry; switch (sct) { case NVME_SCT_GENERIC: entry = generic_status; break; case NVME_SCT_COMMAND_SPECIFIC: entry = command_specific_status; break; case NVME_SCT_MEDIA_ERROR: entry = media_error_status; break; case NVME_SCT_PATH_RELATED: entry = path_related_status; break; case NVME_SCT_VENDOR_SPECIFIC: return ("VENDOR SPECIFIC"); default: return ("RESERVED"); } while (entry->sc != 0xFFFF) { if (entry->sc == sc) return (entry->str); entry++; } return (entry->str); } static void nvme_qpair_print_completion(struct nvme_qpair *qpair, struct nvme_completion *cpl) { uint16_t sct, sc; sct = NVME_STATUS_GET_SCT(cpl->status); sc = NVME_STATUS_GET_SC(cpl->status); nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n", get_status_string(sct, sc), sct, sc, cpl->sqid, cpl->cid, cpl->cdw0); } static bool nvme_completion_is_retry(const struct nvme_completion *cpl) { uint8_t sct, sc, dnr; sct = NVME_STATUS_GET_SCT(cpl->status); sc = NVME_STATUS_GET_SC(cpl->status); dnr = NVME_STATUS_GET_DNR(cpl->status); /* Do Not Retry Bit */ /* * TODO: spec is not clear how commands that are aborted due * to TLER will be marked. So for now, it seems * NAMESPACE_NOT_READY is the only case where we should * look at the DNR bit. Requests failed with ABORTED_BY_REQUEST * set the DNR bit correctly since the driver controls that. */ switch (sct) { case NVME_SCT_GENERIC: switch (sc) { case NVME_SC_ABORTED_BY_REQUEST: case NVME_SC_NAMESPACE_NOT_READY: if (dnr) return (0); else return (1); case NVME_SC_INVALID_OPCODE: case NVME_SC_INVALID_FIELD: case NVME_SC_COMMAND_ID_CONFLICT: case NVME_SC_DATA_TRANSFER_ERROR: case NVME_SC_ABORTED_POWER_LOSS: case NVME_SC_INTERNAL_DEVICE_ERROR: case NVME_SC_ABORTED_SQ_DELETION: case NVME_SC_ABORTED_FAILED_FUSED: case NVME_SC_ABORTED_MISSING_FUSED: case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: case NVME_SC_COMMAND_SEQUENCE_ERROR: case NVME_SC_LBA_OUT_OF_RANGE: case NVME_SC_CAPACITY_EXCEEDED: default: return (0); } case NVME_SCT_COMMAND_SPECIFIC: case NVME_SCT_MEDIA_ERROR: return (0); case NVME_SCT_PATH_RELATED: switch (sc) { case NVME_SC_INTERNAL_PATH_ERROR: if (dnr) return (0); else return (1); default: return (0); } case NVME_SCT_VENDOR_SPECIFIC: default: return (0); } } static void nvme_qpair_complete_tracker(struct nvme_tracker *tr, struct nvme_completion *cpl, error_print_t print_on_error) { struct nvme_qpair * qpair = tr->qpair; struct nvme_request *req; bool retry, error, retriable; req = tr->req; error = nvme_completion_is_error(cpl); retriable = nvme_completion_is_retry(cpl); retry = error && retriable && req->retries < nvme_retry_count; if (retry) qpair->num_retries++; if (error && req->retries >= nvme_retry_count && retriable) qpair->num_failures++; if (error && (print_on_error == ERROR_PRINT_ALL || (!retry && print_on_error == ERROR_PRINT_NO_RETRY))) { nvme_qpair_print_command(qpair, &req->cmd); nvme_qpair_print_completion(qpair, cpl); } qpair->act_tr[cpl->cid] = NULL; KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n")); if (!retry) { if (req->type != NVME_REQUEST_NULL) { bus_dmamap_sync(qpair->dma_tag_payload, tr->payload_dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } if (req->cb_fn) req->cb_fn(req->cb_arg, cpl); } mtx_lock(&qpair->lock); callout_stop(&tr->timer); if (retry) { req->retries++; nvme_qpair_submit_tracker(qpair, tr); } else { if (req->type != NVME_REQUEST_NULL) { bus_dmamap_unload(qpair->dma_tag_payload, tr->payload_dma_map); } nvme_free_request(req); tr->req = NULL; TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq); TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); /* * If the controller is in the middle of resetting, don't * try to submit queued requests here - let the reset logic * handle that instead. */ if (!STAILQ_EMPTY(&qpair->queued_req) && !qpair->ctrlr->is_resetting) { req = STAILQ_FIRST(&qpair->queued_req); STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); _nvme_qpair_submit_request(qpair, req); } } mtx_unlock(&qpair->lock); } static void nvme_qpair_manual_complete_tracker( struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr, error_print_t print_on_error) { struct nvme_completion cpl; memset(&cpl, 0, sizeof(cpl)); struct nvme_qpair * qpair = tr->qpair; cpl.sqid = qpair->id; cpl.cid = tr->cid; cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT; cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT; cpl.status |= (dnr & NVME_STATUS_DNR_MASK) << NVME_STATUS_DNR_SHIFT; nvme_qpair_complete_tracker(tr, &cpl, print_on_error); } void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, struct nvme_request *req, uint32_t sct, uint32_t sc) { struct nvme_completion cpl; bool error; memset(&cpl, 0, sizeof(cpl)); cpl.sqid = qpair->id; cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT; cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT; error = nvme_completion_is_error(&cpl); if (error) { nvme_qpair_print_command(qpair, &req->cmd); nvme_qpair_print_completion(qpair, &cpl); } if (req->cb_fn) req->cb_fn(req->cb_arg, &cpl); nvme_free_request(req); } bool nvme_qpair_process_completions(struct nvme_qpair *qpair) { struct nvme_tracker *tr; struct nvme_completion cpl; int done = 0; bool in_panic = dumping || SCHEDULER_STOPPED(); qpair->num_intr_handler_calls++; /* * qpair is not enabled, likely because a controller reset is is in * progress. Ignore the interrupt - any I/O that was associated with * this interrupt will get retried when the reset is complete. */ if (!qpair->is_enabled) return (false); /* * A panic can stop the CPU this routine is running on at any point. If * we're called during a panic, complete the sq_head wrap protocol for * the case where we are interrupted just after the increment at 1 * below, but before we can reset cq_head to zero at 2. Also cope with * the case where we do the zero at 2, but may or may not have done the * phase adjustment at step 3. The panic machinery flushes all pending * memory writes, so we can make these strong ordering assumptions * that would otherwise be unwise if we were racing in real time. */ if (__predict_false(in_panic)) { if (qpair->cq_head == qpair->num_entries) { /* * Here we know that we need to zero cq_head and then negate * the phase, which hasn't been assigned if cq_head isn't * zero due to the atomic_store_rel. */ qpair->cq_head = 0; qpair->phase = !qpair->phase; } else if (qpair->cq_head == 0) { /* * In this case, we know that the assignment at 2 * happened below, but we don't know if it 3 happened or * not. To do this, we look at the last completion * entry and set the phase to the opposite phase * that it has. This gets us back in sync */ cpl = qpair->cpl[qpair->num_entries - 1]; nvme_completion_swapbytes(&cpl); qpair->phase = !NVME_STATUS_GET_P(cpl.status); } } bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); while (1) { cpl = qpair->cpl[qpair->cq_head]; /* Convert to host endian */ nvme_completion_swapbytes(&cpl); if (NVME_STATUS_GET_P(cpl.status) != qpair->phase) break; tr = qpair->act_tr[cpl.cid]; if (tr != NULL) { nvme_qpair_complete_tracker(tr, &cpl, ERROR_PRINT_ALL); qpair->sq_head = cpl.sqhd; done++; } else if (!in_panic) { /* * A missing tracker is normally an error. However, a * panic can stop the CPU this routine is running on * after completing an I/O but before updating * qpair->cq_head at 1 below. Later, we re-enter this * routine to poll I/O associated with the kernel * dump. We find that the tr has been set to null before * calling the completion routine. If it hasn't * completed (or it triggers a panic), then '1' below * won't have updated cq_head. Rather than panic again, * ignore this condition because it's not unexpected. */ nvme_printf(qpair->ctrlr, "cpl does not map to outstanding cmd\n"); /* nvme_dump_completion expects device endianess */ nvme_dump_completion(&qpair->cpl[qpair->cq_head]); KASSERT(0, ("received completion for unknown cmd")); } /* * There's a number of races with the following (see above) when * the system panics. We compensate for each one of them by * using the atomic store to force strong ordering (at least when * viewed in the aftermath of a panic). */ if (++qpair->cq_head == qpair->num_entries) { /* 1 */ atomic_store_rel_int(&qpair->cq_head, 0); /* 2 */ qpair->phase = !qpair->phase; /* 3 */ } bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle, qpair->cq_hdbl_off, qpair->cq_head); } return (done != 0); } static void nvme_qpair_msix_handler(void *arg) { struct nvme_qpair *qpair = arg; nvme_qpair_process_completions(qpair); } int nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t num_entries, uint32_t num_trackers, struct nvme_controller *ctrlr) { struct nvme_tracker *tr; size_t cmdsz, cplsz, prpsz, allocsz, prpmemsz; uint64_t queuemem_phys, prpmem_phys, list_phys; uint8_t *queuemem, *prpmem, *prp_list; int i, err; qpair->vector = ctrlr->msix_enabled ? qpair->id : 0; qpair->num_entries = num_entries; qpair->num_trackers = num_trackers; qpair->ctrlr = ctrlr; if (ctrlr->msix_enabled) { /* * MSI-X vector resource IDs start at 1, so we add one to * the queue's vector to get the corresponding rid to use. */ qpair->rid = qpair->vector + 1; qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, &qpair->rid, RF_ACTIVE); bus_setup_intr(ctrlr->dev, qpair->res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_qpair_msix_handler, qpair, &qpair->tag); if (qpair->id == 0) { bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, "admin"); } else { bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag, "io%d", qpair->id - 1); } } mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF); /* Note: NVMe PRP format is restricted to 4-byte alignment. */ err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 4, PAGE_SIZE, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE, (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0, NULL, NULL, &qpair->dma_tag_payload); if (err != 0) { nvme_printf(ctrlr, "payload tag create failed %d\n", err); goto out; } /* * Each component must be page aligned, and individual PRP lists * cannot cross a page boundary. */ cmdsz = qpair->num_entries * sizeof(struct nvme_command); cmdsz = roundup2(cmdsz, PAGE_SIZE); cplsz = qpair->num_entries * sizeof(struct nvme_completion); cplsz = roundup2(cplsz, PAGE_SIZE); - prpsz = sizeof(uint64_t) * NVME_MAX_PRP_LIST_ENTRIES;; + prpsz = sizeof(uint64_t) * NVME_MAX_PRP_LIST_ENTRIES; prpmemsz = qpair->num_trackers * prpsz; allocsz = cmdsz + cplsz + prpmemsz; err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag); if (err != 0) { nvme_printf(ctrlr, "tag create failed %d\n", err); goto out; } bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain); if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem, BUS_DMA_NOWAIT, &qpair->queuemem_map)) { nvme_printf(ctrlr, "failed to alloc qpair memory\n"); goto out; } if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map, queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) { nvme_printf(ctrlr, "failed to load qpair memory\n"); goto out; } qpair->num_cmds = 0; qpair->num_intr_handler_calls = 0; qpair->num_retries = 0; qpair->num_failures = 0; qpair->cmd = (struct nvme_command *)queuemem; qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz); prpmem = (uint8_t *)(queuemem + cmdsz + cplsz); qpair->cmd_bus_addr = queuemem_phys; qpair->cpl_bus_addr = queuemem_phys + cmdsz; prpmem_phys = queuemem_phys + cmdsz + cplsz; /* * Calcuate the stride of the doorbell register. Many emulators set this * value to correspond to a cache line. However, some hardware has set * it to various small values. */ qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[0]) + (qpair->id << (ctrlr->dstrd + 1)); qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[0]) + (qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd); TAILQ_INIT(&qpair->free_tr); TAILQ_INIT(&qpair->outstanding_tr); STAILQ_INIT(&qpair->queued_req); list_phys = prpmem_phys; prp_list = prpmem; for (i = 0; i < qpair->num_trackers; i++) { if (list_phys + prpsz > prpmem_phys + prpmemsz) { qpair->num_trackers = i; break; } /* * Make sure that the PRP list for this tracker doesn't * overflow to another page. */ if (trunc_page(list_phys) != trunc_page(list_phys + prpsz - 1)) { list_phys = roundup2(list_phys, PAGE_SIZE); prp_list = (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE); } tr = malloc_domainset(sizeof(*tr), M_NVME, DOMAINSET_PREF(qpair->domain), M_ZERO | M_WAITOK); bus_dmamap_create(qpair->dma_tag_payload, 0, &tr->payload_dma_map); callout_init(&tr->timer, 1); tr->cid = i; tr->qpair = qpair; tr->prp = (uint64_t *)prp_list; tr->prp_bus_addr = list_phys; TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq); list_phys += prpsz; prp_list += prpsz; } if (qpair->num_trackers == 0) { nvme_printf(ctrlr, "failed to allocate enough trackers\n"); goto out; } qpair->act_tr = malloc_domainset(sizeof(struct nvme_tracker *) * qpair->num_entries, M_NVME, DOMAINSET_PREF(qpair->domain), M_ZERO | M_WAITOK); return (0); out: nvme_qpair_destroy(qpair); return (ENOMEM); } static void nvme_qpair_destroy(struct nvme_qpair *qpair) { struct nvme_tracker *tr; if (qpair->tag) bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag); if (mtx_initialized(&qpair->lock)) mtx_destroy(&qpair->lock); if (qpair->res) bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ, rman_get_rid(qpair->res), qpair->res); if (qpair->cmd != NULL) { bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map); bus_dmamem_free(qpair->dma_tag, qpair->cmd, qpair->queuemem_map); } if (qpair->act_tr) free_domain(qpair->act_tr, M_NVME); while (!TAILQ_EMPTY(&qpair->free_tr)) { tr = TAILQ_FIRST(&qpair->free_tr); TAILQ_REMOVE(&qpair->free_tr, tr, tailq); bus_dmamap_destroy(qpair->dma_tag_payload, tr->payload_dma_map); free_domain(tr, M_NVME); } if (qpair->dma_tag) bus_dma_tag_destroy(qpair->dma_tag); if (qpair->dma_tag_payload) bus_dma_tag_destroy(qpair->dma_tag_payload); } static void nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair) { struct nvme_tracker *tr; tr = TAILQ_FIRST(&qpair->outstanding_tr); while (tr != NULL) { if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) { nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0, ERROR_PRINT_NONE); tr = TAILQ_FIRST(&qpair->outstanding_tr); } else { tr = TAILQ_NEXT(tr, tailq); } } } void nvme_admin_qpair_destroy(struct nvme_qpair *qpair) { nvme_admin_qpair_abort_aers(qpair); nvme_qpair_destroy(qpair); } void nvme_io_qpair_destroy(struct nvme_qpair *qpair) { nvme_qpair_destroy(qpair); } static void nvme_abort_complete(void *arg, const struct nvme_completion *status) { struct nvme_tracker *tr = arg; /* * If cdw0 == 1, the controller was not able to abort the command * we requested. We still need to check the active tracker array, * to cover race where I/O timed out at same time controller was * completing the I/O. */ if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) { /* * An I/O has timed out, and the controller was unable to * abort it for some reason. Construct a fake completion * status, and then complete the I/O's tracker manually. */ nvme_printf(tr->qpair->ctrlr, "abort command failed, aborting command manually\n"); nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_ALL); } } static void nvme_timeout(void *arg) { struct nvme_tracker *tr = arg; struct nvme_qpair *qpair = tr->qpair; struct nvme_controller *ctrlr = qpair->ctrlr; uint32_t csts; uint8_t cfs; /* * Read csts to get value of cfs - controller fatal status. * If no fatal status, try to call the completion routine, and * if completes transactions, report a missed interrupt and * return (this may need to be rate limited). Otherwise, if * aborts are enabled and the controller is not reporting * fatal status, abort the command. Otherwise, just reset the * controller and hope for the best. */ csts = nvme_mmio_read_4(ctrlr, csts); cfs = (csts >> NVME_CSTS_REG_CFS_SHIFT) & NVME_CSTS_REG_CFS_MASK; if (cfs == 0 && nvme_qpair_process_completions(qpair)) { nvme_printf(ctrlr, "Missing interrupt\n"); return; } if (ctrlr->enable_aborts && cfs == 0) { nvme_printf(ctrlr, "Aborting command due to a timeout.\n"); nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id, nvme_abort_complete, tr); } else { nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n", (csts == 0xffffffff) ? " and possible hot unplug" : (cfs ? " and fatal error status" : "")); nvme_ctrlr_reset(ctrlr); } } void nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr) { struct nvme_request *req; struct nvme_controller *ctrlr; mtx_assert(&qpair->lock, MA_OWNED); req = tr->req; req->cmd.cid = tr->cid; qpair->act_tr[tr->cid] = tr; ctrlr = qpair->ctrlr; if (req->timeout) callout_reset_on(&tr->timer, ctrlr->timeout_period * hz, nvme_timeout, tr, qpair->cpu); /* Copy the command from the tracker to the submission queue. */ memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd)); if (++qpair->sq_tail == qpair->num_entries) qpair->sq_tail = 0; bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); #ifndef __powerpc__ /* * powerpc's bus_dmamap_sync() already includes a heavyweight sync, but * no other archs do. */ wmb(); #endif bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle, qpair->sq_tdbl_off, qpair->sq_tail); qpair->num_cmds++; } static void nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) { struct nvme_tracker *tr = arg; uint32_t cur_nseg; /* * If the mapping operation failed, return immediately. The caller * is responsible for detecting the error status and failing the * tracker manually. */ if (error != 0) { nvme_printf(tr->qpair->ctrlr, "nvme_payload_map err %d\n", error); return; } /* * Note that we specified PAGE_SIZE for alignment and max * segment size when creating the bus dma tags. So here * we can safely just transfer each segment to its * associated PRP entry. */ tr->req->cmd.prp1 = htole64(seg[0].ds_addr); if (nseg == 2) { tr->req->cmd.prp2 = htole64(seg[1].ds_addr); } else if (nseg > 2) { cur_nseg = 1; tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr); while (cur_nseg < nseg) { tr->prp[cur_nseg-1] = htole64((uint64_t)seg[cur_nseg].ds_addr); cur_nseg++; } } else { /* * prp2 should not be used by the controller * since there is only one segment, but set * to 0 just to be safe. */ tr->req->cmd.prp2 = 0; } bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); nvme_qpair_submit_tracker(tr->qpair, tr); } static void _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) { struct nvme_tracker *tr; int err = 0; mtx_assert(&qpair->lock, MA_OWNED); tr = TAILQ_FIRST(&qpair->free_tr); req->qpair = qpair; if (tr == NULL || !qpair->is_enabled) { /* * No tracker is available, or the qpair is disabled due to * an in-progress controller-level reset or controller * failure. */ if (qpair->ctrlr->is_failed) { /* * The controller has failed. Post the request to a * task where it will be aborted, so that we do not * invoke the request's callback in the context * of the submission. */ nvme_ctrlr_post_failed_request(qpair->ctrlr, req); } else { /* * Put the request on the qpair's request queue to be * processed when a tracker frees up via a command * completion or when the controller reset is * completed. */ STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq); } return; } TAILQ_REMOVE(&qpair->free_tr, tr, tailq); TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq); tr->req = req; switch (req->type) { case NVME_REQUEST_VADDR: KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size, ("payload_size (%d) exceeds max_xfer_size (%d)\n", req->payload_size, qpair->ctrlr->max_xfer_size)); err = bus_dmamap_load(tr->qpair->dma_tag_payload, tr->payload_dma_map, req->u.payload, req->payload_size, nvme_payload_map, tr, 0); if (err != 0) nvme_printf(qpair->ctrlr, "bus_dmamap_load returned 0x%x!\n", err); break; case NVME_REQUEST_NULL: nvme_qpair_submit_tracker(tr->qpair, tr); break; case NVME_REQUEST_BIO: KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size, ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n", (intmax_t)req->u.bio->bio_bcount, qpair->ctrlr->max_xfer_size)); err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload, tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0); if (err != 0) nvme_printf(qpair->ctrlr, "bus_dmamap_load_bio returned 0x%x!\n", err); break; case NVME_REQUEST_CCB: err = bus_dmamap_load_ccb(tr->qpair->dma_tag_payload, tr->payload_dma_map, req->u.payload, nvme_payload_map, tr, 0); if (err != 0) nvme_printf(qpair->ctrlr, "bus_dmamap_load_ccb returned 0x%x!\n", err); break; default: panic("unknown nvme request type 0x%x\n", req->type); break; } if (err != 0) { /* * The dmamap operation failed, so we manually fail the * tracker here with DATA_TRANSFER_ERROR status. * * nvme_qpair_manual_complete_tracker must not be called * with the qpair lock held. */ mtx_unlock(&qpair->lock); nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, NVME_SC_DATA_TRANSFER_ERROR, DO_NOT_RETRY, ERROR_PRINT_ALL); mtx_lock(&qpair->lock); } } void nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) { mtx_lock(&qpair->lock); _nvme_qpair_submit_request(qpair, req); mtx_unlock(&qpair->lock); } static void nvme_qpair_enable(struct nvme_qpair *qpair) { qpair->is_enabled = true; } void nvme_qpair_reset(struct nvme_qpair *qpair) { qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0; /* * First time through the completion queue, HW will set phase * bit on completions to 1. So set this to 1 here, indicating * we're looking for a 1 to know which entries have completed. * we'll toggle the bit each time when the completion queue * rolls over. */ qpair->phase = 1; memset(qpair->cmd, 0, qpair->num_entries * sizeof(struct nvme_command)); memset(qpair->cpl, 0, qpair->num_entries * sizeof(struct nvme_completion)); } void nvme_admin_qpair_enable(struct nvme_qpair *qpair) { struct nvme_tracker *tr; struct nvme_tracker *tr_temp; /* * Manually abort each outstanding admin command. Do not retry * admin commands found here, since they will be left over from * a controller reset and its likely the context in which the * command was issued no longer applies. */ TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { nvme_printf(qpair->ctrlr, "aborting outstanding admin command\n"); nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL); } nvme_qpair_enable(qpair); } void nvme_io_qpair_enable(struct nvme_qpair *qpair) { STAILQ_HEAD(, nvme_request) temp; struct nvme_tracker *tr; struct nvme_tracker *tr_temp; struct nvme_request *req; /* * Manually abort each outstanding I/O. This normally results in a * retry, unless the retry count on the associated request has * reached its limit. */ TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) { nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n"); nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_NO_RETRY); } mtx_lock(&qpair->lock); nvme_qpair_enable(qpair); STAILQ_INIT(&temp); STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request); while (!STAILQ_EMPTY(&temp)) { req = STAILQ_FIRST(&temp); STAILQ_REMOVE_HEAD(&temp, stailq); nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n"); nvme_qpair_print_command(qpair, &req->cmd); _nvme_qpair_submit_request(qpair, req); } mtx_unlock(&qpair->lock); } static void nvme_qpair_disable(struct nvme_qpair *qpair) { struct nvme_tracker *tr; qpair->is_enabled = false; mtx_lock(&qpair->lock); TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) callout_stop(&tr->timer); mtx_unlock(&qpair->lock); } void nvme_admin_qpair_disable(struct nvme_qpair *qpair) { nvme_qpair_disable(qpair); nvme_admin_qpair_abort_aers(qpair); } void nvme_io_qpair_disable(struct nvme_qpair *qpair) { nvme_qpair_disable(qpair); } void nvme_qpair_fail(struct nvme_qpair *qpair) { struct nvme_tracker *tr; struct nvme_request *req; if (!mtx_initialized(&qpair->lock)) return; mtx_lock(&qpair->lock); while (!STAILQ_EMPTY(&qpair->queued_req)) { req = STAILQ_FIRST(&qpair->queued_req); STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); nvme_printf(qpair->ctrlr, "failing queued i/o\n"); mtx_unlock(&qpair->lock); nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST); mtx_lock(&qpair->lock); } /* Manually abort each outstanding I/O. */ while (!TAILQ_EMPTY(&qpair->outstanding_tr)) { tr = TAILQ_FIRST(&qpair->outstanding_tr); /* * Do not remove the tracker. The abort_tracker path will * do that for us. */ nvme_printf(qpair->ctrlr, "failing outstanding i/o\n"); mtx_unlock(&qpair->lock); nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL); mtx_lock(&qpair->lock); } mtx_unlock(&qpair->lock); } Index: head/sys/dev/ocs_fc/ocs_hw_queues.c =================================================================== --- head/sys/dev/ocs_fc/ocs_hw_queues.c (revision 359440) +++ head/sys/dev/ocs_fc/ocs_hw_queues.c (revision 359441) @@ -1,2613 +1,2613 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * */ #include "ocs_os.h" #include "ocs_hw.h" #include "ocs_hw_queues.h" #define HW_QTOP_DEBUG 0 /** * @brief Initialize queues * * Given the parsed queue topology spec, the SLI queues are created and * initialized * * @param hw pointer to HW object * @param qtop pointer to queue topology * * @return returns 0 for success, an error code value for failure. */ ocs_hw_rtn_e ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop) { uint32_t i, j; uint32_t default_lengths[QTOP_LAST], len; uint32_t rqset_len = 0, rqset_ulp = 0, rqset_count = 0; uint8_t rqset_filter_mask = 0; hw_eq_t *eqs[hw->config.n_rq]; hw_cq_t *cqs[hw->config.n_rq]; hw_rq_t *rqs[hw->config.n_rq]; ocs_hw_qtop_entry_t *qt, *next_qt; ocs_hw_mrq_t mrq; bool use_mrq = FALSE; hw_eq_t *eq = NULL; hw_cq_t *cq = NULL; hw_wq_t *wq = NULL; hw_rq_t *rq = NULL; hw_mq_t *mq = NULL; mrq.num_pairs = 0; default_lengths[QTOP_EQ] = 1024; default_lengths[QTOP_CQ] = hw->num_qentries[SLI_QTYPE_CQ]; default_lengths[QTOP_WQ] = hw->num_qentries[SLI_QTYPE_WQ]; default_lengths[QTOP_RQ] = hw->num_qentries[SLI_QTYPE_RQ]; default_lengths[QTOP_MQ] = OCS_HW_MQ_DEPTH; ocs_hw_verify(hw != NULL, OCS_HW_RTN_INVALID_ARG); hw->eq_count = 0; hw->cq_count = 0; hw->mq_count = 0; hw->wq_count = 0; hw->rq_count = 0; hw->hw_rq_count = 0; ocs_list_init(&hw->eq_list, hw_eq_t, link); /* If MRQ is requested, Check if it is supported by SLI. */ if ((hw->config.n_rq > 1 ) && !hw->sli.config.features.flag.mrqp) { ocs_log_err(hw->os, "MRQ topology not supported by SLI4.\n"); return OCS_HW_RTN_ERROR; } if (hw->config.n_rq > 1) use_mrq = TRUE; /* Allocate class WQ pools */ for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) { hw->wq_class_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ); if (hw->wq_class_array[i] == NULL) { ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n"); return OCS_HW_RTN_NO_MEMORY; } } /* Allocate per CPU WQ pools */ for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) { hw->wq_cpu_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ); if (hw->wq_cpu_array[i] == NULL) { ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n"); return OCS_HW_RTN_NO_MEMORY; } } ocs_hw_assert(qtop != NULL); for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) { if (i == qtop->inuse_count - 1) next_qt = NULL; else next_qt = qt + 1; switch(qt->entry) { case QTOP_EQ: len = (qt->len) ? qt->len : default_lengths[QTOP_EQ]; if (qt->set_default) { default_lengths[QTOP_EQ] = len; break; } eq = hw_new_eq(hw, len); if (eq == NULL) { hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } break; case QTOP_CQ: len = (qt->len) ? qt->len : default_lengths[QTOP_CQ]; if (qt->set_default) { default_lengths[QTOP_CQ] = len; break; } if (!eq || !next_qt) { goto fail; } /* If this CQ is for MRQ, then delay the creation */ if (!use_mrq || next_qt->entry != QTOP_RQ) { cq = hw_new_cq(eq, len); if (cq == NULL) { goto fail; } } break; case QTOP_WQ: { len = (qt->len) ? qt->len : default_lengths[QTOP_WQ]; if (qt->set_default) { default_lengths[QTOP_WQ] = len; break; } if ((hw->ulp_start + qt->ulp) > hw->ulp_max) { ocs_log_err(hw->os, "invalid ULP %d for WQ\n", qt->ulp); hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } if (cq == NULL) goto fail; wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp); if (wq == NULL) { goto fail; } /* Place this WQ on the EQ WQ array */ if (ocs_varray_add(eq->wq_array, wq)) { ocs_log_err(hw->os, "QTOP_WQ: EQ ocs_varray_add failed\n"); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } /* Place this WQ on the HW class array */ if (qt->class < ARRAY_SIZE(hw->wq_class_array)) { if (ocs_varray_add(hw->wq_class_array[qt->class], wq)) { ocs_log_err(hw->os, "HW wq_class_array ocs_varray_add failed\n"); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } } else { ocs_log_err(hw->os, "Invalid class value: %d\n", qt->class); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } /* * Place this WQ on the per CPU list, asumming that EQs are mapped to cpu given * by the EQ instance modulo number of CPUs */ if (ocs_varray_add(hw->wq_cpu_array[eq->instance % ocs_get_num_cpus()], wq)) { ocs_log_err(hw->os, "HW wq_cpu_array ocs_varray_add failed\n"); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } break; } case QTOP_RQ: { len = (qt->len) ? qt->len : default_lengths[QTOP_RQ]; if (qt->set_default) { default_lengths[QTOP_RQ] = len; break; } if ((hw->ulp_start + qt->ulp) > hw->ulp_max) { ocs_log_err(hw->os, "invalid ULP %d for RQ\n", qt->ulp); hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } if (use_mrq) { mrq.rq_cfg[mrq.num_pairs].len = len; mrq.rq_cfg[mrq.num_pairs].ulp = hw->ulp_start + qt->ulp; mrq.rq_cfg[mrq.num_pairs].filter_mask = qt->filter_mask; mrq.rq_cfg[mrq.num_pairs].eq = eq; mrq.num_pairs ++; } else { rq = hw_new_rq(cq, len, hw->ulp_start + qt->ulp); if (rq == NULL) { hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } rq->filter_mask = qt->filter_mask; } break; } case QTOP_MQ: len = (qt->len) ? qt->len : default_lengths[QTOP_MQ]; if (qt->set_default) { default_lengths[QTOP_MQ] = len; break; } if (cq == NULL) goto fail; mq = hw_new_mq(cq, len); if (mq == NULL) { goto fail; } break; default: ocs_hw_assert(0); break; } } if (mrq.num_pairs) { /* First create normal RQs. */ for (i = 0; i < mrq.num_pairs; i++) { for (j = 0; j < mrq.num_pairs; j++) { if ((i != j) && (mrq.rq_cfg[i].filter_mask == mrq.rq_cfg[j].filter_mask)) { /* This should be created using set */ if (rqset_filter_mask && (rqset_filter_mask != mrq.rq_cfg[i].filter_mask)) { ocs_log_crit(hw->os, "Cant create morethan one RQ Set\n"); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } else if (!rqset_filter_mask){ rqset_filter_mask = mrq.rq_cfg[i].filter_mask; rqset_len = mrq.rq_cfg[i].len; rqset_ulp = mrq.rq_cfg[i].ulp; } eqs[rqset_count] = mrq.rq_cfg[i].eq; rqset_count++; break; } } if (j == mrq.num_pairs) { /* Normal RQ */ cq = hw_new_cq(mrq.rq_cfg[i].eq, default_lengths[QTOP_CQ]); if (cq == NULL) { hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } rq = hw_new_rq(cq, mrq.rq_cfg[i].len, mrq.rq_cfg[i].ulp); if (rq == NULL) { hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } rq->filter_mask = mrq.rq_cfg[i].filter_mask; } } /* Now create RQ Set */ if (rqset_count) { if (rqset_count > OCE_HW_MAX_NUM_MRQ_PAIRS) { ocs_log_crit(hw->os, "Max Supported MRQ pairs = %d\n", OCE_HW_MAX_NUM_MRQ_PAIRS); hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } /* Create CQ set */ if (hw_new_cq_set(eqs, cqs, rqset_count, default_lengths[QTOP_CQ])) { hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } /* Create RQ set */ if (hw_new_rq_set(cqs, rqs, rqset_count, rqset_len, rqset_ulp)) { hw_queue_teardown(hw); return OCS_HW_RTN_ERROR; } for (i = 0; i < rqset_count ; i++) { rqs[i]->filter_mask = rqset_filter_mask; rqs[i]->is_mrq = TRUE; rqs[i]->base_mrq_id = rqs[0]->hdr->id; } hw->hw_mrq_count = rqset_count; } } return OCS_HW_RTN_SUCCESS; fail: hw_queue_teardown(hw); return OCS_HW_RTN_NO_MEMORY; } /** * @brief Allocate a new EQ object * * A new EQ object is instantiated * * @param hw pointer to HW object * @param entry_count number of entries in the EQ * * @return pointer to allocated EQ object */ hw_eq_t* hw_new_eq(ocs_hw_t *hw, uint32_t entry_count) { hw_eq_t *eq = ocs_malloc(hw->os, sizeof(*eq), OCS_M_ZERO | OCS_M_NOWAIT); if (eq != NULL) { eq->type = SLI_QTYPE_EQ; eq->hw = hw; eq->entry_count = entry_count; eq->instance = hw->eq_count++; eq->queue = &hw->eq[eq->instance]; ocs_list_init(&eq->cq_list, hw_cq_t, link); eq->wq_array = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ); if (eq->wq_array == NULL) { ocs_free(hw->os, eq, sizeof(*eq)); eq = NULL; } else { if (sli_queue_alloc(&hw->sli, SLI_QTYPE_EQ, eq->queue, entry_count, NULL, 0)) { ocs_log_err(hw->os, "EQ[%d] allocation failure\n", eq->instance); ocs_free(hw->os, eq, sizeof(*eq)); eq = NULL; } else { sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8); hw->hw_eq[eq->instance] = eq; ocs_list_add_tail(&hw->eq_list, eq); ocs_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, eq->queue->id, eq->entry_count); } } } return eq; } /** * @brief Allocate a new CQ object * * A new CQ object is instantiated * * @param eq pointer to parent EQ object * @param entry_count number of entries in the CQ * * @return pointer to allocated CQ object */ hw_cq_t* hw_new_cq(hw_eq_t *eq, uint32_t entry_count) { ocs_hw_t *hw = eq->hw; hw_cq_t *cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT); if (cq != NULL) { cq->eq = eq; cq->type = SLI_QTYPE_CQ; cq->instance = eq->hw->cq_count++; cq->entry_count = entry_count; cq->queue = &hw->cq[cq->instance]; ocs_list_init(&cq->q_list, hw_q_t, link); if (sli_queue_alloc(&hw->sli, SLI_QTYPE_CQ, cq->queue, cq->entry_count, eq->queue, 0)) { ocs_log_err(hw->os, "CQ[%d] allocation failure len=%d\n", eq->instance, eq->entry_count); ocs_free(hw->os, cq, sizeof(*cq)); cq = NULL; } else { hw->hw_cq[cq->instance] = cq; ocs_list_add_tail(&eq->cq_list, cq); ocs_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, cq->queue->id, cq->entry_count); } } return cq; } /** * @brief Allocate a new CQ Set of objects. * * @param eqs pointer to a set of EQ objects. * @param cqs pointer to a set of CQ objects to be returned. * @param num_cqs number of CQ queues in the set. * @param entry_count number of entries in the CQ. * * @return 0 on success and -1 on failure. */ uint32_t hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count) { uint32_t i; ocs_hw_t *hw = eqs[0]->hw; sli4_t *sli4 = &hw->sli; hw_cq_t *cq = NULL; sli4_queue_t *qs[SLI_MAX_CQ_SET_COUNT], *assocs[SLI_MAX_CQ_SET_COUNT]; /* Initialise CQS pointers to NULL */ for (i = 0; i < num_cqs; i++) { cqs[i] = NULL; } for (i = 0; i < num_cqs; i++) { cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT); if (cq == NULL) goto error; cqs[i] = cq; cq->eq = eqs[i]; cq->type = SLI_QTYPE_CQ; cq->instance = hw->cq_count++; cq->entry_count = entry_count; cq->queue = &hw->cq[cq->instance]; qs[i] = cq->queue; assocs[i] = eqs[i]->queue; ocs_list_init(&cq->q_list, hw_q_t, link); } if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assocs)) { ocs_log_err(NULL, "Failed to create CQ Set. \n"); goto error; } for (i = 0; i < num_cqs; i++) { hw->hw_cq[cqs[i]->instance] = cqs[i]; ocs_list_add_tail(&cqs[i]->eq->cq_list, cqs[i]); } return 0; error: for (i = 0; i < num_cqs; i++) { if (cqs[i]) { ocs_free(hw->os, cqs[i], sizeof(*cqs[i])); cqs[i] = NULL; } } return -1; } /** * @brief Allocate a new MQ object * * A new MQ object is instantiated * * @param cq pointer to parent CQ object * @param entry_count number of entries in the MQ * * @return pointer to allocated MQ object */ hw_mq_t* hw_new_mq(hw_cq_t *cq, uint32_t entry_count) { ocs_hw_t *hw = cq->eq->hw; hw_mq_t *mq = ocs_malloc(hw->os, sizeof(*mq), OCS_M_ZERO | OCS_M_NOWAIT); if (mq != NULL) { mq->cq = cq; mq->type = SLI_QTYPE_MQ; mq->instance = cq->eq->hw->mq_count++; mq->entry_count = entry_count; mq->entry_size = OCS_HW_MQ_DEPTH; mq->queue = &hw->mq[mq->instance]; if (sli_queue_alloc(&hw->sli, SLI_QTYPE_MQ, mq->queue, mq->entry_size, cq->queue, 0)) { ocs_log_err(hw->os, "MQ allocation failure\n"); ocs_free(hw->os, mq, sizeof(*mq)); mq = NULL; } else { hw->hw_mq[mq->instance] = mq; ocs_list_add_tail(&cq->q_list, mq); ocs_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, mq->queue->id, mq->entry_count); } } return mq; } /** * @brief Allocate a new WQ object * * A new WQ object is instantiated * * @param cq pointer to parent CQ object * @param entry_count number of entries in the WQ * @param class WQ class * @param ulp index of chute * * @return pointer to allocated WQ object */ hw_wq_t* hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp) { ocs_hw_t *hw = cq->eq->hw; hw_wq_t *wq = ocs_malloc(hw->os, sizeof(*wq), OCS_M_ZERO | OCS_M_NOWAIT); if (wq != NULL) { wq->hw = cq->eq->hw; wq->cq = cq; wq->type = SLI_QTYPE_WQ; wq->instance = cq->eq->hw->wq_count++; wq->entry_count = entry_count; wq->queue = &hw->wq[wq->instance]; wq->ulp = ulp; wq->wqec_set_count = OCS_HW_WQEC_SET_COUNT; wq->wqec_count = wq->wqec_set_count; wq->free_count = wq->entry_count - 1; wq->class = class; ocs_list_init(&wq->pending_list, ocs_hw_wqe_t, link); if (sli_queue_alloc(&hw->sli, SLI_QTYPE_WQ, wq->queue, wq->entry_count, cq->queue, ulp)) { ocs_log_err(hw->os, "WQ allocation failure\n"); ocs_free(hw->os, wq, sizeof(*wq)); wq = NULL; } else { hw->hw_wq[wq->instance] = wq; ocs_list_add_tail(&cq->q_list, wq); ocs_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d ulp %d\n", wq->instance, wq->queue->id, wq->entry_count, wq->class, wq->ulp); } } return wq; } /** * @brief Allocate a hw_rq_t object * * Allocate an RQ object, which encapsulates 2 SLI queues (for rq pair) * * @param cq pointer to parent CQ object * @param entry_count number of entries in the RQs * @param ulp ULP index for this RQ * * @return pointer to newly allocated hw_rq_t */ hw_rq_t* hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp) { ocs_hw_t *hw = cq->eq->hw; hw_rq_t *rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT); uint32_t max_hw_rq; ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq); if (rq != NULL) { rq->instance = hw->hw_rq_count++; rq->cq = cq; rq->type = SLI_QTYPE_RQ; rq->ulp = ulp; rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR)); /* Create the header RQ */ ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq)); rq->hdr = &hw->rq[hw->rq_count]; rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE; if (sli_fc_rq_alloc(&hw->sli, rq->hdr, rq->entry_count, rq->hdr_entry_size, cq->queue, ulp, TRUE)) { ocs_log_err(hw->os, "RQ allocation failure - header\n"); ocs_free(hw->os, rq, sizeof(*rq)); return NULL; } hw->hw_rq_lookup[hw->rq_count] = rq->instance; /* Update hw_rq_lookup[] */ hw->rq_count++; ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d hdr size %4d ulp %d\n", rq->instance, rq->hdr->id, rq->entry_count, rq->hdr_entry_size, rq->ulp); /* Create the default data RQ */ ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq)); rq->data = &hw->rq[hw->rq_count]; rq->data_entry_size = hw->config.rq_default_buffer_size; if (sli_fc_rq_alloc(&hw->sli, rq->data, rq->entry_count, rq->data_entry_size, cq->queue, ulp, FALSE)) { ocs_log_err(hw->os, "RQ allocation failure - first burst\n"); ocs_free(hw->os, rq, sizeof(*rq)); return NULL; } hw->hw_rq_lookup[hw->rq_count] = rq->instance; /* Update hw_rq_lookup[] */ hw->rq_count++; ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d data size %4d ulp %d\n", rq->instance, rq->data->id, rq->entry_count, rq->data_entry_size, rq->ulp); hw->hw_rq[rq->instance] = rq; ocs_list_add_tail(&cq->q_list, rq); rq->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) * rq->entry_count, OCS_M_ZERO | OCS_M_NOWAIT); if (rq->rq_tracker == NULL) { ocs_log_err(hw->os, "RQ tracker buf allocation failure\n"); return NULL; } } return rq; } /** * @brief Allocate a hw_rq_t object SET * * Allocate an RQ object SET, where each element in set * encapsulates 2 SLI queues (for rq pair) * * @param cqs pointers to be associated with RQs. * @param rqs RQ pointers to be returned on success. * @param num_rq_pairs number of rq pairs in the Set. * @param entry_count number of entries in the RQs * @param ulp ULP index for this RQ * * @return 0 in success and -1 on failure. */ uint32_t hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp) { ocs_hw_t *hw = cqs[0]->eq->hw; hw_rq_t *rq = NULL; sli4_queue_t *qs[SLI_MAX_RQ_SET_COUNT * 2] = { NULL }; uint32_t max_hw_rq, i, q_count; ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq); /* Initialise RQS pointers */ for (i = 0; i < num_rq_pairs; i++) { rqs[i] = NULL; } for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) { rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT); if (rq == NULL) goto error; rqs[i] = rq; rq->instance = hw->hw_rq_count++; rq->cq = cqs[i]; rq->type = SLI_QTYPE_RQ; rq->ulp = ulp; rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR)); /* Header RQ */ rq->hdr = &hw->rq[hw->rq_count]; rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE; hw->hw_rq_lookup[hw->rq_count] = rq->instance; hw->rq_count++; qs[q_count] = rq->hdr; /* Data RQ */ rq->data = &hw->rq[hw->rq_count]; rq->data_entry_size = hw->config.rq_default_buffer_size; hw->hw_rq_lookup[hw->rq_count] = rq->instance; hw->rq_count++; qs[q_count + 1] = rq->data; rq->rq_tracker = NULL; } if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs, cqs[0]->queue->id, rqs[0]->entry_count, rqs[0]->hdr_entry_size, rqs[0]->data_entry_size, ulp)) { ocs_log_err(hw->os, "RQ Set allocation failure for base CQ=%d\n", cqs[0]->queue->id); goto error; } for (i = 0; i < num_rq_pairs; i++) { hw->hw_rq[rqs[i]->instance] = rqs[i]; ocs_list_add_tail(&cqs[i]->q_list, rqs[i]); rqs[i]->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) * rqs[i]->entry_count, OCS_M_ZERO | OCS_M_NOWAIT); if (rqs[i]->rq_tracker == NULL) { ocs_log_err(hw->os, "RQ tracker buf allocation failure\n"); goto error; } } return 0; error: for (i = 0; i < num_rq_pairs; i++) { if (rqs[i] != NULL) { if (rqs[i]->rq_tracker != NULL) { ocs_free(hw->os, rqs[i]->rq_tracker, sizeof(ocs_hw_sequence_t*) * rqs[i]->entry_count); } ocs_free(hw->os, rqs[i], sizeof(*rqs[i])); } } return -1; } /** * @brief Free an EQ object * * The EQ object and any child queue objects are freed * * @param eq pointer to EQ object * * @return none */ void hw_del_eq(hw_eq_t *eq) { if (eq != NULL) { hw_cq_t *cq; hw_cq_t *cq_next; ocs_list_foreach_safe(&eq->cq_list, cq, cq_next) { hw_del_cq(cq); } ocs_varray_free(eq->wq_array); ocs_list_remove(&eq->hw->eq_list, eq); eq->hw->hw_eq[eq->instance] = NULL; ocs_free(eq->hw->os, eq, sizeof(*eq)); } } /** * @brief Free a CQ object * * The CQ object and any child queue objects are freed * * @param cq pointer to CQ object * * @return none */ void hw_del_cq(hw_cq_t *cq) { if (cq != NULL) { hw_q_t *q; hw_q_t *q_next; ocs_list_foreach_safe(&cq->q_list, q, q_next) { switch(q->type) { case SLI_QTYPE_MQ: hw_del_mq((hw_mq_t*) q); break; case SLI_QTYPE_WQ: hw_del_wq((hw_wq_t*) q); break; case SLI_QTYPE_RQ: hw_del_rq((hw_rq_t*) q); break; default: break; } } ocs_list_remove(&cq->eq->cq_list, cq); cq->eq->hw->hw_cq[cq->instance] = NULL; ocs_free(cq->eq->hw->os, cq, sizeof(*cq)); } } /** * @brief Free a MQ object * * The MQ object is freed * * @param mq pointer to MQ object * * @return none */ void hw_del_mq(hw_mq_t *mq) { if (mq != NULL) { ocs_list_remove(&mq->cq->q_list, mq); mq->cq->eq->hw->hw_mq[mq->instance] = NULL; ocs_free(mq->cq->eq->hw->os, mq, sizeof(*mq)); } } /** * @brief Free a WQ object * * The WQ object is freed * * @param wq pointer to WQ object * * @return none */ void hw_del_wq(hw_wq_t *wq) { if (wq != NULL) { ocs_list_remove(&wq->cq->q_list, wq); wq->cq->eq->hw->hw_wq[wq->instance] = NULL; ocs_free(wq->cq->eq->hw->os, wq, sizeof(*wq)); } } /** * @brief Free an RQ object * * The RQ object is freed * * @param rq pointer to RQ object * * @return none */ void hw_del_rq(hw_rq_t *rq) { if (rq != NULL) { ocs_hw_t *hw = rq->cq->eq->hw; /* Free RQ tracker */ if (rq->rq_tracker != NULL) { ocs_free(hw->os, rq->rq_tracker, sizeof(ocs_hw_sequence_t*) * rq->entry_count); rq->rq_tracker = NULL; } ocs_list_remove(&rq->cq->q_list, rq); hw->hw_rq[rq->instance] = NULL; ocs_free(hw->os, rq, sizeof(*rq)); } } /** * @brief Display HW queue objects * * The HW queue objects are displayed using ocs_log * * @param hw pointer to HW object * * @return none */ void hw_queue_dump(ocs_hw_t *hw) { hw_eq_t *eq; hw_cq_t *cq; hw_q_t *q; hw_mq_t *mq; hw_wq_t *wq; hw_rq_t *rq; ocs_list_foreach(&hw->eq_list, eq) { ocs_printf("eq[%d] id %2d\n", eq->instance, eq->queue->id); ocs_list_foreach(&eq->cq_list, cq) { ocs_printf(" cq[%d] id %2d current\n", cq->instance, cq->queue->id); ocs_list_foreach(&cq->q_list, q) { switch(q->type) { case SLI_QTYPE_MQ: mq = (hw_mq_t *) q; ocs_printf(" mq[%d] id %2d\n", mq->instance, mq->queue->id); break; case SLI_QTYPE_WQ: wq = (hw_wq_t *) q; ocs_printf(" wq[%d] id %2d\n", wq->instance, wq->queue->id); break; case SLI_QTYPE_RQ: rq = (hw_rq_t *) q; ocs_printf(" rq[%d] hdr id %2d\n", rq->instance, rq->hdr->id); break; default: break; } } } } } /** * @brief Teardown HW queue objects * * The HW queue objects are freed * * @param hw pointer to HW object * * @return none */ void hw_queue_teardown(ocs_hw_t *hw) { uint32_t i; hw_eq_t *eq; hw_eq_t *eq_next; if (ocs_list_valid(&hw->eq_list)) { ocs_list_foreach_safe(&hw->eq_list, eq, eq_next) { hw_del_eq(eq); } } for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) { ocs_varray_free(hw->wq_cpu_array[i]); hw->wq_cpu_array[i] = NULL; } for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) { ocs_varray_free(hw->wq_class_array[i]); hw->wq_class_array[i] = NULL; } } /** * @brief Allocate a WQ to an IO object * * The next work queue index is used to assign a WQ to an IO. * * If wq_steering is OCS_HW_WQ_STEERING_CLASS, a WQ from io->wq_class is * selected. * * If wq_steering is OCS_HW_WQ_STEERING_REQUEST, then a WQ from the EQ that * the IO request came in on is selected. * * If wq_steering is OCS_HW_WQ_STEERING_CPU, then a WQ associted with the * CPU the request is made on is selected. * * @param hw pointer to HW object * @param io pointer to IO object * * @return Return pointer to next WQ */ hw_wq_t * ocs_hw_queue_next_wq(ocs_hw_t *hw, ocs_hw_io_t *io) { hw_eq_t *eq; hw_wq_t *wq = NULL; switch(io->wq_steering) { case OCS_HW_WQ_STEERING_CLASS: if (likely(io->wq_class < ARRAY_SIZE(hw->wq_class_array))) { wq = ocs_varray_iter_next(hw->wq_class_array[io->wq_class]); } break; case OCS_HW_WQ_STEERING_REQUEST: eq = io->eq; if (likely(eq != NULL)) { wq = ocs_varray_iter_next(eq->wq_array); } break; case OCS_HW_WQ_STEERING_CPU: { uint32_t cpuidx = ocs_thread_getcpu(); if (likely(cpuidx < ARRAY_SIZE(hw->wq_cpu_array))) { wq = ocs_varray_iter_next(hw->wq_cpu_array[cpuidx]); } break; } } if (unlikely(wq == NULL)) { wq = hw->hw_wq[0]; } return wq; } /** * @brief Return count of EQs for a queue topology object * * The EQ count for in the HWs queue topology (hw->qtop) object is returned * * @param hw pointer to HW object * * @return count of EQs */ uint32_t ocs_hw_qtop_eq_count(ocs_hw_t *hw) { return hw->qtop->entry_counts[QTOP_EQ]; } #define TOKEN_LEN 32 /** * @brief return string given a QTOP entry * * @param entry QTOP entry * * @return returns string or "unknown" */ #if HW_QTOP_DEBUG static char * qtopentry2s(ocs_hw_qtop_entry_e entry) { switch(entry) { #define P(x) case x: return #x; P(QTOP_EQ) P(QTOP_CQ) P(QTOP_WQ) P(QTOP_RQ) P(QTOP_MQ) P(QTOP_THREAD_START) P(QTOP_THREAD_END) P(QTOP_LAST) #undef P } return "unknown"; } #endif /** * @brief Declare token types */ typedef enum { TOK_LPAREN = 1, TOK_RPAREN, TOK_COLON, TOK_EQUALS, TOK_QUEUE, TOK_ATTR_NAME, TOK_NUMBER, TOK_NUMBER_VALUE, TOK_NUMBER_LIST, } tok_type_e; /** * @brief Declare token sub-types */ typedef enum { TOK_SUB_EQ = 100, TOK_SUB_CQ, TOK_SUB_RQ, TOK_SUB_MQ, TOK_SUB_WQ, TOK_SUB_LEN, TOK_SUB_CLASS, TOK_SUB_ULP, TOK_SUB_FILTER, } tok_subtype_e; /** * @brief convert queue subtype to QTOP entry * * @param q queue subtype * * @return QTOP entry or 0 */ static ocs_hw_qtop_entry_e subtype2qtop(tok_subtype_e q) { switch(q) { case TOK_SUB_EQ: return QTOP_EQ; case TOK_SUB_CQ: return QTOP_CQ; case TOK_SUB_RQ: return QTOP_RQ; case TOK_SUB_MQ: return QTOP_MQ; case TOK_SUB_WQ: return QTOP_WQ; default: break; } return 0; } /** * @brief Declare token object */ typedef struct { tok_type_e type; tok_subtype_e subtype; char string[TOKEN_LEN]; } tok_t; /** * @brief Declare token array object */ typedef struct { tok_t *tokens; /* Pointer to array of tokens */ uint32_t alloc_count; /* Number of tokens in the array */ uint32_t inuse_count; /* Number of tokens posted to array */ uint32_t iter_idx; /* Iterator index */ } tokarray_t; /** * @brief Declare token match structure */ typedef struct { char *s; tok_type_e type; tok_subtype_e subtype; } tokmatch_t; /** * @brief test if character is ID start character * * @param c character to test * * @return TRUE if character is an ID start character */ static int32_t idstart(int c) { return isalpha(c) || (c == '_') || (c == '$'); } /** * @brief test if character is an ID character * * @param c character to test * * @return TRUE if character is an ID character */ static int32_t idchar(int c) { return idstart(c) || ocs_isdigit(c); } /** * @brief Declare single character matches */ static tokmatch_t cmatches[] = { {"(", TOK_LPAREN}, {")", TOK_RPAREN}, {":", TOK_COLON}, {"=", TOK_EQUALS}, }; /** * @brief Declare identifier match strings */ static tokmatch_t smatches[] = { {"eq", TOK_QUEUE, TOK_SUB_EQ}, {"cq", TOK_QUEUE, TOK_SUB_CQ}, {"rq", TOK_QUEUE, TOK_SUB_RQ}, {"mq", TOK_QUEUE, TOK_SUB_MQ}, {"wq", TOK_QUEUE, TOK_SUB_WQ}, {"len", TOK_ATTR_NAME, TOK_SUB_LEN}, {"class", TOK_ATTR_NAME, TOK_SUB_CLASS}, {"ulp", TOK_ATTR_NAME, TOK_SUB_ULP}, {"filter", TOK_ATTR_NAME, TOK_SUB_FILTER}, }; /** * @brief Scan string and return next token * * The string is scanned and the next token is returned * * @param s input string to scan * @param tok pointer to place scanned token * * @return pointer to input string following scanned token, or NULL */ static const char * tokenize(const char *s, tok_t *tok) { uint32_t i; memset(tok, 0, sizeof(*tok)); /* Skip over whitespace */ while (*s && ocs_isspace(*s)) { s++; } /* Return if nothing left in this string */ if (*s == 0) { return NULL; } /* Look for single character matches */ for (i = 0; i < ARRAY_SIZE(cmatches); i++) { if (cmatches[i].s[0] == *s) { tok->type = cmatches[i].type; tok->subtype = cmatches[i].subtype; tok->string[0] = *s++; return s; } } /* Scan for a hex number or decimal */ if ((s[0] == '0') && ((s[1] == 'x') || (s[1] == 'X'))) { char *p = tok->string; tok->type = TOK_NUMBER; *p++ = *s++; *p++ = *s++; while ((*s == '.') || ocs_isxdigit(*s)) { if ((p - tok->string) < (int32_t)sizeof(tok->string)) { *p++ = *s; } if (*s == ',') { tok->type = TOK_NUMBER_LIST; } s++; } *p = 0; return s; } else if (ocs_isdigit(*s)) { char *p = tok->string; tok->type = TOK_NUMBER; while ((*s == ',') || ocs_isdigit(*s)) { if ((p - tok->string) < (int32_t)sizeof(tok->string)) { *p++ = *s; } if (*s == ',') { tok->type = TOK_NUMBER_LIST; } s++; } *p = 0; return s; } /* Scan for an ID */ if (idstart(*s)) { char *p = tok->string; for (*p++ = *s++; idchar(*s); s++) { if ((p - tok->string) < TOKEN_LEN) { *p++ = *s; } } /* See if this is a $ number value */ if (tok->string[0] == '$') { tok->type = TOK_NUMBER_VALUE; } else { /* Look for a string match */ for (i = 0; i < ARRAY_SIZE(smatches); i++) { if (strcmp(smatches[i].s, tok->string) == 0) { tok->type = smatches[i].type; tok->subtype = smatches[i].subtype; return s; } } } } return s; } /** * @brief convert token type to string * * @param type token type * * @return string, or "unknown" */ static const char * token_type2s(tok_type_e type) { switch(type) { #define P(x) case x: return #x; P(TOK_LPAREN) P(TOK_RPAREN) P(TOK_COLON) P(TOK_EQUALS) P(TOK_QUEUE) P(TOK_ATTR_NAME) P(TOK_NUMBER) P(TOK_NUMBER_VALUE) P(TOK_NUMBER_LIST) #undef P } return "unknown"; } /** * @brief convert token sub-type to string * * @param subtype token sub-type * * @return string, or "unknown" */ static const char * token_subtype2s(tok_subtype_e subtype) { switch(subtype) { #define P(x) case x: return #x; P(TOK_SUB_EQ) P(TOK_SUB_CQ) P(TOK_SUB_RQ) P(TOK_SUB_MQ) P(TOK_SUB_WQ) P(TOK_SUB_LEN) P(TOK_SUB_CLASS) P(TOK_SUB_ULP) P(TOK_SUB_FILTER) #undef P } return ""; } /** * @brief Generate syntax error message * * A syntax error message is found, the input tokens are dumped up to and including * the token that failed as indicated by the current iterator index. * * @param hw pointer to HW object * @param tokarray pointer to token array object * * @return none */ static void tok_syntax(ocs_hw_t *hw, tokarray_t *tokarray) { uint32_t i; tok_t *tok; ocs_log_test(hw->os, "Syntax error:\n"); for (i = 0, tok = tokarray->tokens; (i <= tokarray->inuse_count); i++, tok++) { ocs_log_test(hw->os, "%s [%2d] %-16s %-16s %s\n", (i == tokarray->iter_idx) ? ">>>" : " ", i, token_type2s(tok->type), token_subtype2s(tok->subtype), tok->string); } } /** * @brief parse a number * * Parses tokens of type TOK_NUMBER and TOK_NUMBER_VALUE, returning a numeric value * * @param hw pointer to HW object * @param qtop pointer to QTOP object * @param tok pointer to token to parse * * @return numeric value */ static uint32_t tok_getnumber(ocs_hw_t *hw, ocs_hw_qtop_t *qtop, tok_t *tok) { uint32_t rval = 0; uint32_t num_cpus = ocs_get_num_cpus(); switch(tok->type) { case TOK_NUMBER_VALUE: if (ocs_strcmp(tok->string, "$ncpu") == 0) { rval = num_cpus; } else if (ocs_strcmp(tok->string, "$ncpu1") == 0) { rval = num_cpus - 1; } else if (ocs_strcmp(tok->string, "$nwq") == 0) { if (hw != NULL) { rval = hw->config.n_wq; } } else if (ocs_strcmp(tok->string, "$maxmrq") == 0) { rval = MIN(num_cpus, OCS_HW_MAX_MRQS); } else if (ocs_strcmp(tok->string, "$nulp") == 0) { rval = hw->ulp_max - hw->ulp_start + 1; } else if ((qtop->rptcount_idx > 0) && ocs_strcmp(tok->string, "$rpt0") == 0) { rval = qtop->rptcount[qtop->rptcount_idx-1]; } else if ((qtop->rptcount_idx > 1) && ocs_strcmp(tok->string, "$rpt1") == 0) { rval = qtop->rptcount[qtop->rptcount_idx-2]; } else if ((qtop->rptcount_idx > 2) && ocs_strcmp(tok->string, "$rpt2") == 0) { rval = qtop->rptcount[qtop->rptcount_idx-3]; } else if ((qtop->rptcount_idx > 3) && ocs_strcmp(tok->string, "$rpt3") == 0) { rval = qtop->rptcount[qtop->rptcount_idx-4]; } else { rval = ocs_strtoul(tok->string, 0, 0); } break; case TOK_NUMBER: rval = ocs_strtoul(tok->string, 0, 0); break; default: break; } return rval; } /** * @brief parse an array of tokens * * The tokens are semantically parsed, to generate QTOP entries. * * @param hw pointer to HW object * @param tokarray array array of tokens * @param qtop ouptut QTOP object * * @return returns 0 for success, a negative error code value for failure. */ static int32_t parse_topology(ocs_hw_t *hw, tokarray_t *tokarray, ocs_hw_qtop_t *qtop) { ocs_hw_qtop_entry_t *qt = qtop->entries + qtop->inuse_count; tok_t *tok; for (; (tokarray->iter_idx < tokarray->inuse_count) && ((tok = &tokarray->tokens[tokarray->iter_idx]) != NULL); ) { if (qtop->inuse_count >= qtop->alloc_count) { return -1; } qt = qtop->entries + qtop->inuse_count; switch (tok[0].type) { case TOK_QUEUE: qt->entry = subtype2qtop(tok[0].subtype); qt->set_default = FALSE; qt->len = 0; qt->class = 0; qtop->inuse_count++; tokarray->iter_idx++; /* Advance current token index */ /* Parse for queue attributes, possibly multiple instances */ while ((tokarray->iter_idx + 4) <= tokarray->inuse_count) { tok = &tokarray->tokens[tokarray->iter_idx]; if( (tok[0].type == TOK_COLON) && (tok[1].type == TOK_ATTR_NAME) && (tok[2].type == TOK_EQUALS) && ((tok[3].type == TOK_NUMBER) || (tok[3].type == TOK_NUMBER_VALUE) || (tok[3].type == TOK_NUMBER_LIST))) { switch (tok[1].subtype) { case TOK_SUB_LEN: qt->len = tok_getnumber(hw, qtop, &tok[3]); break; case TOK_SUB_CLASS: qt->class = tok_getnumber(hw, qtop, &tok[3]); break; case TOK_SUB_ULP: qt->ulp = tok_getnumber(hw, qtop, &tok[3]); break; case TOK_SUB_FILTER: if (tok[3].type == TOK_NUMBER_LIST) { uint32_t mask = 0; char *p = tok[3].string; while ((p != NULL) && *p) { uint32_t v; v = ocs_strtoul(p, 0, 0); if (v < 32) { mask |= (1U << v); } p = ocs_strchr(p, ','); if (p != NULL) { p++; } } qt->filter_mask = mask; } else { qt->filter_mask = (1U << tok_getnumber(hw, qtop, &tok[3])); } break; default: break; } /* Advance current token index */ tokarray->iter_idx += 4; } else { break; } } qtop->entry_counts[qt->entry]++; break; case TOK_ATTR_NAME: if ( ((tokarray->iter_idx + 5) <= tokarray->inuse_count) && (tok[1].type == TOK_COLON) && (tok[2].type == TOK_QUEUE) && (tok[3].type == TOK_EQUALS) && ((tok[4].type == TOK_NUMBER) || (tok[4].type == TOK_NUMBER_VALUE))) { qt->entry = subtype2qtop(tok[2].subtype); qt->set_default = TRUE; switch(tok[0].subtype) { case TOK_SUB_LEN: qt->len = tok_getnumber(hw, qtop, &tok[4]); break; case TOK_SUB_CLASS: qt->class = tok_getnumber(hw, qtop, &tok[4]); break; case TOK_SUB_ULP: qt->ulp = tok_getnumber(hw, qtop, &tok[4]); break; default: break; } qtop->inuse_count++; tokarray->iter_idx += 5; } else { tok_syntax(hw, tokarray); return -1; } break; case TOK_NUMBER: case TOK_NUMBER_VALUE: { uint32_t rpt_count = 1; uint32_t i; rpt_count = tok_getnumber(hw, qtop, tok); if (tok[1].type == TOK_LPAREN) { uint32_t iter_idx_save; tokarray->iter_idx += 2; /* save token array iteration index */ iter_idx_save = tokarray->iter_idx; for (i = 0; i < rpt_count; i++) { uint32_t rptcount_idx = qtop->rptcount_idx; if (qtop->rptcount_idx < ARRAY_SIZE(qtop->rptcount)) { qtop->rptcount[qtop->rptcount_idx++] = i; } /* restore token array iteration index */ tokarray->iter_idx = iter_idx_save; /* parse, append to qtop */ parse_topology(hw, tokarray, qtop); qtop->rptcount_idx = rptcount_idx; } } break; } case TOK_RPAREN: tokarray->iter_idx++; return 0; default: tok_syntax(hw, tokarray); return -1; } } return 0; } /** * @brief Parse queue topology string * * The queue topology object is allocated, and filled with the results of parsing the * passed in queue topology string * * @param hw pointer to HW object * @param qtop_string input queue topology string * * @return pointer to allocated QTOP object, or NULL if there was an error */ ocs_hw_qtop_t * ocs_hw_qtop_parse(ocs_hw_t *hw, const char *qtop_string) { ocs_hw_qtop_t *qtop; tokarray_t tokarray; const char *s; #if HW_QTOP_DEBUG uint32_t i; ocs_hw_qtop_entry_t *qt; #endif ocs_log_debug(hw->os, "queue topology: %s\n", qtop_string); /* Allocate a token array */ tokarray.tokens = ocs_malloc(hw->os, MAX_TOKENS * sizeof(*tokarray.tokens), OCS_M_ZERO | OCS_M_NOWAIT); if (tokarray.tokens == NULL) { return NULL; } tokarray.alloc_count = MAX_TOKENS; tokarray.inuse_count = 0; tokarray.iter_idx = 0; /* Parse the tokens */ for (s = qtop_string; (tokarray.inuse_count < tokarray.alloc_count) && ((s = tokenize(s, &tokarray.tokens[tokarray.inuse_count]))) != NULL; ) { - tokarray.inuse_count++;; + tokarray.inuse_count++; } /* Allocate a queue topology structure */ qtop = ocs_malloc(hw->os, sizeof(*qtop), OCS_M_ZERO | OCS_M_NOWAIT); if (qtop == NULL) { ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens)); ocs_log_err(hw->os, "malloc qtop failed\n"); return NULL; } qtop->os = hw->os; /* Allocate queue topology entries */ qtop->entries = ocs_malloc(hw->os, OCS_HW_MAX_QTOP_ENTRIES*sizeof(*qtop->entries), OCS_M_ZERO | OCS_M_NOWAIT); if (qtop->entries == NULL) { ocs_log_err(hw->os, "malloc qtop entries failed\n"); ocs_free(hw->os, qtop, sizeof(*qtop)); ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens)); return NULL; } qtop->alloc_count = OCS_HW_MAX_QTOP_ENTRIES; qtop->inuse_count = 0; /* Parse the tokens */ parse_topology(hw, &tokarray, qtop); #if HW_QTOP_DEBUG for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) { ocs_log_debug(hw->os, "entry %s set_df %d len %4d class %d ulp %d\n", qtopentry2s(qt->entry), qt->set_default, qt->len, qt->class, qt->ulp); } #endif /* Free the tokens array */ ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens)); return qtop; } /** * @brief free queue topology object * * @param qtop pointer to QTOP object * * @return none */ void ocs_hw_qtop_free(ocs_hw_qtop_t *qtop) { if (qtop != NULL) { if (qtop->entries != NULL) { ocs_free(qtop->os, qtop->entries, qtop->alloc_count*sizeof(*qtop->entries)); } ocs_free(qtop->os, qtop, sizeof(*qtop)); } } /* Uncomment this to turn on RQ debug */ // #define ENABLE_DEBUG_RQBUF static int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id); static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex); static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq); static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq); /** * @brief Process receive queue completions for RQ Pair mode. * * @par Description * RQ completions are processed. In RQ pair mode, a single header and single payload * buffer are received, and passed to the function that has registered for unsolicited * callbacks. * * @param hw Hardware context. * @param cq Pointer to HW completion queue. * @param cqe Completion queue entry. * * @return Returns 0 for success, or a negative error code value for failure. */ int32_t ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe) { uint16_t rq_id; uint32_t index; int32_t rqindex; int32_t rq_status; uint32_t h_len; uint32_t p_len; ocs_hw_sequence_t *seq; rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index); if (0 != rq_status) { switch (rq_status) { case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED: case SLI4_FC_ASYNC_RQ_DMA_FAILURE: /* just get RQ buffer then return to chip */ rqindex = ocs_hw_rqpair_find(hw, rq_id); if (rqindex < 0) { ocs_log_test(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n", rq_status, rq_id); break; } /* get RQ buffer */ seq = ocs_hw_rqpair_get(hw, rqindex, index); /* return to chip */ if (ocs_hw_rqpair_sequence_free(hw, seq)) { ocs_log_test(hw->os, "status=%#x, failed to return buffers to RQ\n", rq_status); break; } break; case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED: case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC: /* since RQ buffers were not consumed, cannot return them to chip */ /* fall through */ ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status); default: break; } return -1; } rqindex = ocs_hw_rqpair_find(hw, rq_id); if (rqindex < 0) { ocs_log_test(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id); return -1; } OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++; rq->payload_use_count++;}) seq = ocs_hw_rqpair_get(hw, rqindex, index); ocs_hw_assert(seq != NULL); seq->hw = hw; seq->auto_xrdy = 0; seq->out_of_xris = 0; seq->xri = 0; seq->hio = NULL; sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len); seq->header->dma.len = h_len; seq->payload->dma.len = p_len; seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe); seq->hw_priv = cq->eq; /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = seq->header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id); } } else { hw->callback.unsolicited(hw->args.unsolicited, seq); } return 0; } /** * @brief Process receive queue completions for RQ Pair mode - Auto xfer rdy * * @par Description * RQ completions are processed. In RQ pair mode, a single header and single payload * buffer are received, and passed to the function that has registered for unsolicited * callbacks. * * @param hw Hardware context. * @param cq Pointer to HW completion queue. * @param cqe Completion queue entry. * * @return Returns 0 for success, or a negative error code value for failure. */ int32_t ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe) { /* Seems silly to call a SLI function to decode - use the structure directly for performance */ sli4_fc_optimized_write_cmd_cqe_t *opt_wr = (sli4_fc_optimized_write_cmd_cqe_t*)cqe; uint16_t rq_id; uint32_t index; int32_t rqindex; int32_t rq_status; uint32_t h_len; uint32_t p_len; ocs_hw_sequence_t *seq; uint8_t axr_lock_taken = 0; #if defined(OCS_DISC_SPIN_DELAY) uint32_t delay = 0; char prop_buf[32]; #endif rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index); if (0 != rq_status) { switch (rq_status) { case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED: case SLI4_FC_ASYNC_RQ_DMA_FAILURE: /* just get RQ buffer then return to chip */ rqindex = ocs_hw_rqpair_find(hw, rq_id); if (rqindex < 0) { ocs_log_err(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n", rq_status, rq_id); break; } /* get RQ buffer */ seq = ocs_hw_rqpair_get(hw, rqindex, index); /* return to chip */ if (ocs_hw_rqpair_sequence_free(hw, seq)) { ocs_log_err(hw->os, "status=%#x, failed to return buffers to RQ\n", rq_status); break; } break; case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED: case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC: /* since RQ buffers were not consumed, cannot return them to chip */ ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status); /* fall through */ default: break; } return -1; } rqindex = ocs_hw_rqpair_find(hw, rq_id); if (rqindex < 0) { ocs_log_err(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id); return -1; } OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++; rq->payload_use_count++;}) seq = ocs_hw_rqpair_get(hw, rqindex, index); ocs_hw_assert(seq != NULL); seq->hw = hw; seq->auto_xrdy = opt_wr->agxr; seq->out_of_xris = opt_wr->oox; seq->xri = opt_wr->xri; seq->hio = NULL; sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len); seq->header->dma.len = h_len; seq->payload->dma.len = p_len; seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe); seq->hw_priv = cq->eq; if (seq->auto_xrdy) { fc_header_t *fc_hdr = seq->header->dma.virt; seq->hio = ocs_hw_io_lookup(hw, seq->xri); ocs_lock(&seq->hio->axr_lock); axr_lock_taken = 1; /* save the FCFI, src_id, dest_id and ox_id because we need it for the sequence object when the data comes. */ seq->hio->axr_buf->fcfi = seq->fcfi; seq->hio->axr_buf->hdr.ox_id = fc_hdr->ox_id; seq->hio->axr_buf->hdr.s_id = fc_hdr->s_id; seq->hio->axr_buf->hdr.d_id = fc_hdr->d_id; seq->hio->axr_buf->cmd_cqe = 1; /* * Since auto xfer rdy is used for this IO, then clear the sequence * initiative bit in the header so that the upper layers wait for the * data. This should flow exactly like the first burst case. */ fc_hdr->f_ctl &= fc_htobe24(~FC_FCTL_SEQUENCE_INITIATIVE); /* If AXR CMD CQE came before previous TRSP CQE of same XRI */ if (seq->hio->type == OCS_HW_IO_TARGET_RSP) { seq->hio->axr_buf->call_axr_cmd = 1; seq->hio->axr_buf->cmd_seq = seq; goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd; } } /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = seq->header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id); } } else { hw->callback.unsolicited(hw->args.unsolicited, seq); } if (seq->auto_xrdy) { /* If data cqe came before cmd cqe in out of order in case of AXR */ if(seq->hio->axr_buf->data_cqe == 1) { #if defined(OCS_DISC_SPIN_DELAY) if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) { delay = ocs_strtoul(prop_buf, 0, 0); ocs_udelay(delay); } #endif /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = seq->header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &seq->hio->axr_buf->seq, s_id, d_id, ox_id); } } else { hw->callback.unsolicited(hw->args.unsolicited, &seq->hio->axr_buf->seq); } } } exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd: if(axr_lock_taken) { ocs_unlock(&seq->hio->axr_lock); } return 0; } /** * @brief Process CQ completions for Auto xfer rdy data phases. * * @par Description * The data is DMA'd into the data buffer posted to the SGL prior to the XRI * being assigned to an IO. When the completion is received, All of the data * is in the single buffer. * * @param hw Hardware context. * @param cq Pointer to HW completion queue. * @param cqe Completion queue entry. * * @return Returns 0 for success, or a negative error code value for failure. */ int32_t ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe) { /* Seems silly to call a SLI function to decode - use the structure directly for performance */ sli4_fc_optimized_write_data_cqe_t *opt_wr = (sli4_fc_optimized_write_data_cqe_t*)cqe; ocs_hw_sequence_t *seq; ocs_hw_io_t *io; ocs_hw_auto_xfer_rdy_buffer_t *buf; #if defined(OCS_DISC_SPIN_DELAY) uint32_t delay = 0; char prop_buf[32]; #endif /* Look up the IO */ io = ocs_hw_io_lookup(hw, opt_wr->xri); ocs_lock(&io->axr_lock); buf = io->axr_buf; buf->data_cqe = 1; seq = &buf->seq; seq->hw = hw; seq->auto_xrdy = 1; seq->out_of_xris = 0; seq->xri = opt_wr->xri; seq->hio = io; seq->header = &buf->header; seq->payload = &buf->payload; seq->header->dma.len = sizeof(fc_header_t); seq->payload->dma.len = opt_wr->total_data_placed; seq->fcfi = buf->fcfi; seq->hw_priv = cq->eq; if (opt_wr->status == SLI4_FC_WCQE_STATUS_SUCCESS) { seq->status = OCS_HW_UNSOL_SUCCESS; } else if (opt_wr->status == SLI4_FC_WCQE_STATUS_REMOTE_STOP) { seq->status = OCS_HW_UNSOL_ABTS_RCVD; } else { seq->status = OCS_HW_UNSOL_ERROR; } /* If AXR CMD CQE came before previous TRSP CQE of same XRI */ if(io->type == OCS_HW_IO_TARGET_RSP) { io->axr_buf->call_axr_data = 1; goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data; } if(!buf->cmd_cqe) { /* if data cqe came before cmd cqe, return here, cmd cqe will handle */ goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data; } #if defined(OCS_DISC_SPIN_DELAY) if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) { delay = ocs_strtoul(prop_buf, 0, 0); ocs_udelay(delay); } #endif /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */ if (hw->config.bounce) { fc_header_t *hdr = seq->header->dma.virt; uint32_t s_id = fc_be24toh(hdr->s_id); uint32_t d_id = fc_be24toh(hdr->d_id); uint32_t ox_id = ocs_be16toh(hdr->ox_id); if (hw->callback.bounce != NULL) { (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id); } } else { hw->callback.unsolicited(hw->args.unsolicited, seq); } exit_ocs_hw_rqpair_process_auto_xfr_rdy_data: ocs_unlock(&io->axr_lock); return 0; } /** * @brief Return pointer to RQ buffer entry. * * @par Description * Returns a pointer to the RQ buffer entry given by @c rqindex and @c bufindex. * * @param hw Hardware context. * @param rqindex Index of the RQ that is being processed. * @param bufindex Index into the RQ that is being processed. * * @return Pointer to the sequence structure, or NULL otherwise. */ static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex) { sli4_queue_t *rq_hdr = &hw->rq[rqindex]; sli4_queue_t *rq_payload = &hw->rq[rqindex+1]; ocs_hw_sequence_t *seq = NULL; hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; #if defined(ENABLE_DEBUG_RQBUF) uint64_t rqbuf_debug_value = 0xdead0000 | ((rq->id & 0xf) << 12) | (bufindex & 0xfff); #endif if (bufindex >= rq_hdr->length) { ocs_log_err(hw->os, "RQ index %d bufindex %d exceed ring length %d for id %d\n", rqindex, bufindex, rq_hdr->length, rq_hdr->id); return NULL; } sli_queue_lock(rq_hdr); sli_queue_lock(rq_payload); #if defined(ENABLE_DEBUG_RQBUF) /* Put a debug value into the rq, to track which entries are still valid */ _sli_queue_poke(&hw->sli, rq_hdr, bufindex, (uint8_t *)&rqbuf_debug_value); _sli_queue_poke(&hw->sli, rq_payload, bufindex, (uint8_t *)&rqbuf_debug_value); #endif seq = rq->rq_tracker[bufindex]; rq->rq_tracker[bufindex] = NULL; if (seq == NULL ) { ocs_log_err(hw->os, "RQ buffer NULL, rqindex %d, bufindex %d, current q index = %d\n", rqindex, bufindex, rq_hdr->index); } sli_queue_unlock(rq_payload); sli_queue_unlock(rq_hdr); return seq; } /** * @brief Posts an RQ buffer to a queue and update the verification structures * * @param hw hardware context * @param seq Pointer to sequence object. * * @return Returns 0 on success, or a non-zero value otherwise. */ static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq) { sli4_queue_t *rq_hdr = &hw->rq[seq->header->rqindex]; sli4_queue_t *rq_payload = &hw->rq[seq->payload->rqindex]; uint32_t hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex]; hw_rq_t *rq = hw->hw_rq[hw_rq_index]; uint32_t phys_hdr[2]; uint32_t phys_payload[2]; int32_t qindex_hdr; int32_t qindex_payload; /* Update the RQ verification lookup tables */ phys_hdr[0] = ocs_addr32_hi(seq->header->dma.phys); phys_hdr[1] = ocs_addr32_lo(seq->header->dma.phys); phys_payload[0] = ocs_addr32_hi(seq->payload->dma.phys); phys_payload[1] = ocs_addr32_lo(seq->payload->dma.phys); sli_queue_lock(rq_hdr); sli_queue_lock(rq_payload); /* * Note: The header must be posted last for buffer pair mode because * posting on the header queue posts the payload queue as well. * We do not ring the payload queue independently in RQ pair mode. */ qindex_payload = _sli_queue_write(&hw->sli, rq_payload, (void *)phys_payload); qindex_hdr = _sli_queue_write(&hw->sli, rq_hdr, (void *)phys_hdr); if (qindex_hdr < 0 || qindex_payload < 0) { ocs_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id); sli_queue_unlock(rq_payload); sli_queue_unlock(rq_hdr); return OCS_HW_RTN_ERROR; } /* ensure the indexes are the same */ ocs_hw_assert(qindex_hdr == qindex_payload); /* Update the lookup table */ if (rq->rq_tracker[qindex_hdr] == NULL) { rq->rq_tracker[qindex_hdr] = seq; } else { ocs_log_test(hw->os, "expected rq_tracker[%d][%d] buffer to be NULL\n", hw_rq_index, qindex_hdr); } sli_queue_unlock(rq_payload); sli_queue_unlock(rq_hdr); return OCS_HW_RTN_SUCCESS; } /** * @brief Return RQ buffers (while in RQ pair mode). * * @par Description * The header and payload buffers are returned to the Receive Queue. * * @param hw Hardware context. * @param seq Header/payload sequence buffers. * * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code value on failure. */ ocs_hw_rtn_e ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq) { ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS; /* Check for auto xfer rdy dummy buffers and call the proper release function. */ if (seq->header->rqindex == OCS_HW_RQ_INDEX_DUMMY_HDR) { return ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(hw, seq); } /* * Post the data buffer first. Because in RQ pair mode, ringing the * doorbell of the header ring will post the data buffer as well. */ if (ocs_hw_rqpair_put(hw, seq)) { ocs_log_err(hw->os, "error writing buffers\n"); return OCS_HW_RTN_ERROR; } return rc; } /** * @brief Find the RQ index of RQ_ID. * * @param hw Hardware context. * @param rq_id RQ ID to find. * * @return Returns the RQ index, or -1 if not found */ static inline int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id) { return ocs_hw_queue_hash_find(hw->rq_hash, rq_id); } /** * @ingroup devInitShutdown * @brief Allocate auto xfer rdy buffers. * * @par Description * Allocates the auto xfer rdy buffers and places them on the free list. * * @param hw Hardware context allocated by the caller. * @param num_buffers Number of buffers to allocate. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers) { ocs_hw_auto_xfer_rdy_buffer_t *buf; uint32_t i; hw->auto_xfer_rdy_buf_pool = ocs_pool_alloc(hw->os, sizeof(ocs_hw_auto_xfer_rdy_buffer_t), num_buffers, FALSE); if (hw->auto_xfer_rdy_buf_pool == NULL) { ocs_log_err(hw->os, "Failure to allocate auto xfer ready buffer pool\n"); return OCS_HW_RTN_NO_MEMORY; } for (i = 0; i < num_buffers; i++) { /* allocate the wrapper object */ buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i); ocs_hw_assert(buf != NULL); /* allocate the auto xfer ready buffer */ if (ocs_dma_alloc(hw->os, &buf->payload.dma, hw->config.auto_xfer_rdy_size, OCS_MIN_DMA_ALIGNMENT)) { ocs_log_err(hw->os, "DMA allocation failed\n"); ocs_free(hw->os, buf, sizeof(*buf)); return OCS_HW_RTN_NO_MEMORY; } /* build a fake data header in big endian */ buf->hdr.info = FC_RCTL_INFO_SOL_DATA; buf->hdr.r_ctl = FC_RCTL_FC4_DATA; buf->hdr.type = FC_TYPE_FCP; buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER | FC_FCTL_FIRST_SEQUENCE | FC_FCTL_LAST_SEQUENCE | FC_FCTL_END_SEQUENCE | FC_FCTL_SEQUENCE_INITIATIVE); /* build the fake header DMA object */ buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR; buf->header.dma.virt = &buf->hdr; buf->header.dma.alloc = buf; buf->header.dma.size = sizeof(buf->hdr); buf->header.dma.len = sizeof(buf->hdr); buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA; } return OCS_HW_RTN_SUCCESS; } /** * @ingroup devInitShutdown * @brief Post Auto xfer rdy buffers to the XRIs posted with DNRX. * * @par Description * When new buffers are freed, check existing XRIs waiting for buffers. * * @param hw Hardware context allocated by the caller. */ static void ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(ocs_hw_t *hw) { ocs_hw_io_t *io; int32_t rc; ocs_lock(&hw->io_lock); while (!ocs_list_empty(&hw->io_port_dnrx)) { io = ocs_list_remove_head(&hw->io_port_dnrx); rc = ocs_hw_reque_xri(hw, io); if(rc) { break; } } ocs_unlock(&hw->io_lock); } /** * @brief Called when the POST_SGL_PAGE command completes. * * @par Description * Free the mailbox command buffer. * * @param hw Hardware context. * @param status Status field from the mbox completion. * @param mqe Mailbox response structure. * @param arg Pointer to a callback function that signals the caller that the command is done. * * @return Returns 0. */ static int32_t ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg) { if (status != 0) { ocs_log_debug(hw->os, "Status 0x%x\n", status); } ocs_free(hw->os, mqe, SLI4_BMBX_SIZE); return 0; } /** * @brief Prepares an XRI to move to the chip. * * @par Description * Puts the data SGL into the SGL list for the IO object and possibly registers * an SGL list for the XRI. Since both the POST_XRI and POST_SGL_PAGES commands are * mailbox commands, we don't need to wait for completion before preceding. * * @param hw Hardware context allocated by the caller. * @param io Pointer to the IO object. * * @return Returns OCS_HW_RTN_SUCCESS for success, or an error code value for failure. */ ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io) { /* We only need to preregister the SGL if it has not yet been done. */ if (!sli_get_sgl_preregister(&hw->sli)) { uint8_t *post_sgl; ocs_dma_t *psgls = &io->def_sgl; ocs_dma_t **sgls = &psgls; /* non-local buffer required for mailbox queue */ post_sgl = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT); if (post_sgl == NULL) { ocs_log_err(hw->os, "no buffer for command\n"); return OCS_HW_RTN_NO_MEMORY; } if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, post_sgl, SLI4_BMBX_SIZE, io->indicator, 1, sgls, NULL, NULL)) { if (ocs_hw_command(hw, post_sgl, OCS_CMD_NOWAIT, ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb, NULL)) { ocs_free(hw->os, post_sgl, SLI4_BMBX_SIZE); ocs_log_err(hw->os, "SGL post failed\n"); return OCS_HW_RTN_ERROR; } } } ocs_lock(&hw->io_lock); if (ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 0) != 0) { /* DNRX set - no buffer */ ocs_unlock(&hw->io_lock); return OCS_HW_RTN_ERROR; } ocs_unlock(&hw->io_lock); return OCS_HW_RTN_SUCCESS; } /** * @brief Prepares an XRI to move back to the host. * * @par Description * Releases any attached buffer back to the pool. * * @param hw Hardware context allocated by the caller. * @param io Pointer to the IO object. */ void ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io) { if (io->axr_buf != NULL) { ocs_lock(&hw->io_lock); /* check list and remove if there */ if (ocs_list_on_list(&io->dnrx_link)) { ocs_list_remove(&hw->io_port_dnrx, io); io->auto_xfer_rdy_dnrx = 0; /* release the count for waiting for a buffer */ ocs_hw_io_free(hw, io); } ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf); io->axr_buf = NULL; ocs_unlock(&hw->io_lock); ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw); } return; } /** * @brief Posts an auto xfer rdy buffer to an IO. * * @par Description * Puts the data SGL into the SGL list for the IO object * @n @name * @b Note: io_lock must be held. * * @param hw Hardware context allocated by the caller. * @param io Pointer to the IO object. * * @return Returns the value of DNRX bit in the TRSP and ABORT WQEs. */ uint8_t ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf) { ocs_hw_auto_xfer_rdy_buffer_t *buf; sli4_sge_t *data; if(!reuse_buf) { buf = ocs_pool_get(hw->auto_xfer_rdy_buf_pool); io->axr_buf = buf; } data = io->def_sgl.virt; data[0].sge_type = SLI4_SGE_TYPE_SKIP; data[0].last = 0; /* * Note: if we are doing DIF assists, then the SGE[1] must contain the * DI_SEED SGE. The host is responsible for programming: * SGE Type (Word 2, bits 30:27) * Replacement App Tag (Word 2 bits 15:0) * App Tag (Word 3 bits 15:0) * New Ref Tag (Word 3 bit 23) * Metadata Enable (Word 3 bit 20) * Auto-Increment RefTag (Word 3 bit 19) * Block Size (Word 3 bits 18:16) * The following fields are managed by the SLI Port: * Ref Tag Compare (Word 0) * Replacement Ref Tag (Word 1) - In not the LBA * NA (Word 2 bit 25) * Opcode RX (Word 3 bits 27:24) * Checksum Enable (Word 3 bit 22) * RefTag Enable (Word 3 bit 21) * * The first two SGLs are cleared by ocs_hw_io_init_sges(), so assume eveything is cleared. */ if (hw->config.auto_xfer_rdy_p_type) { sli4_diseed_sge_t *diseed = (sli4_diseed_sge_t*)&data[1]; diseed->sge_type = SLI4_SGE_TYPE_DISEED; diseed->repl_app_tag = hw->config.auto_xfer_rdy_app_tag_value; diseed->app_tag_cmp = hw->config.auto_xfer_rdy_app_tag_value; diseed->check_app_tag = hw->config.auto_xfer_rdy_app_tag_valid; diseed->auto_incr_ref_tag = TRUE; /* Always the LBA */ diseed->dif_blk_size = hw->config.auto_xfer_rdy_blk_size_chip; } else { data[1].sge_type = SLI4_SGE_TYPE_SKIP; data[1].last = 0; } data[2].sge_type = SLI4_SGE_TYPE_DATA; data[2].buffer_address_high = ocs_addr32_hi(io->axr_buf->payload.dma.phys); data[2].buffer_address_low = ocs_addr32_lo(io->axr_buf->payload.dma.phys); data[2].buffer_length = io->axr_buf->payload.dma.size; data[2].last = TRUE; data[3].sge_type = SLI4_SGE_TYPE_SKIP; return 0; } /** * @brief Return auto xfer ready buffers (while in RQ pair mode). * * @par Description * The header and payload buffers are returned to the auto xfer rdy pool. * * @param hw Hardware context. * @param seq Header/payload sequence buffers. * * @return Returns OCS_HW_RTN_SUCCESS for success, an error code value for failure. */ static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq) { ocs_hw_auto_xfer_rdy_buffer_t *buf = seq->header->dma.alloc; buf->data_cqe = 0; buf->cmd_cqe = 0; buf->fcfi = 0; buf->call_axr_cmd = 0; buf->call_axr_data = 0; /* build a fake data header in big endian */ buf->hdr.info = FC_RCTL_INFO_SOL_DATA; buf->hdr.r_ctl = FC_RCTL_FC4_DATA; buf->hdr.type = FC_TYPE_FCP; buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER | FC_FCTL_FIRST_SEQUENCE | FC_FCTL_LAST_SEQUENCE | FC_FCTL_END_SEQUENCE | FC_FCTL_SEQUENCE_INITIATIVE); /* build the fake header DMA object */ buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR; buf->header.dma.virt = &buf->hdr; buf->header.dma.alloc = buf; buf->header.dma.size = sizeof(buf->hdr); buf->header.dma.len = sizeof(buf->hdr); buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA; ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw); return OCS_HW_RTN_SUCCESS; } /** * @ingroup devInitShutdown * @brief Free auto xfer rdy buffers. * * @par Description * Frees the auto xfer rdy buffers. * * @param hw Hardware context allocated by the caller. * * @return Returns 0 on success, or a non-zero value on failure. */ static void ocs_hw_rqpair_auto_xfer_rdy_buffer_free(ocs_hw_t *hw) { ocs_hw_auto_xfer_rdy_buffer_t *buf; uint32_t i; if (hw->auto_xfer_rdy_buf_pool != NULL) { ocs_lock(&hw->io_lock); for (i = 0; i < ocs_pool_get_count(hw->auto_xfer_rdy_buf_pool); i++) { buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i); if (buf != NULL) { ocs_dma_free(hw->os, &buf->payload.dma); } } ocs_unlock(&hw->io_lock); ocs_pool_free(hw->auto_xfer_rdy_buf_pool); hw->auto_xfer_rdy_buf_pool = NULL; } } /** * @ingroup devInitShutdown * @brief Configure the rq_pair function from ocs_hw_init(). * * @par Description * Allocates the buffers to auto xfer rdy and posts initial XRIs for this feature. * * @param hw Hardware context allocated by the caller. * * @return Returns 0 on success, or a non-zero value on failure. */ ocs_hw_rtn_e ocs_hw_rqpair_init(ocs_hw_t *hw) { ocs_hw_rtn_e rc; uint32_t xris_posted; ocs_log_debug(hw->os, "RQ Pair mode\n"); /* * If we get this far, the auto XFR_RDY feature was enabled successfully, otherwise ocs_hw_init() would * return with an error. So allocate the buffers based on the initial XRI pool required to support this * feature. */ if (sli_get_auto_xfer_rdy_capable(&hw->sli) && hw->config.auto_xfer_rdy_size > 0) { if (hw->auto_xfer_rdy_buf_pool == NULL) { /* * Allocate one more buffer than XRIs so that when all the XRIs are in use, we still have * one to post back for the case where the response phase is started in the context of * the data completion. */ rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(hw, hw->config.auto_xfer_rdy_xri_cnt + 1); if (rc != OCS_HW_RTN_SUCCESS) { return rc; } } else { ocs_pool_reset(hw->auto_xfer_rdy_buf_pool); } /* Post the auto XFR_RDY XRIs */ xris_posted = ocs_hw_xri_move_to_port_owned(hw, hw->config.auto_xfer_rdy_xri_cnt); if (xris_posted != hw->config.auto_xfer_rdy_xri_cnt) { ocs_log_err(hw->os, "post_xri failed, only posted %d XRIs\n", xris_posted); return OCS_HW_RTN_ERROR; } } return 0; } /** * @ingroup devInitShutdown * @brief Tear down the rq_pair function from ocs_hw_teardown(). * * @par Description * Frees the buffers to auto xfer rdy. * * @param hw Hardware context allocated by the caller. */ void ocs_hw_rqpair_teardown(ocs_hw_t *hw) { /* We need to free any auto xfer ready buffers */ ocs_hw_rqpair_auto_xfer_rdy_buffer_free(hw); } Index: head/sys/dev/ocs_fc/ocs_io.c =================================================================== --- head/sys/dev/ocs_fc/ocs_io.c (revision 359440) +++ head/sys/dev/ocs_fc/ocs_io.c (revision 359441) @@ -1,491 +1,491 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * Provide IO object allocation. */ /*! * @defgroup io_alloc IO allocation */ #include "ocs.h" #include "ocs_scsi.h" #include "ocs_els.h" #include "ocs_utils.h" void ocs_mgmt_io_list(ocs_textbuf_t *textbuf, void *io); void ocs_mgmt_io_get_all(ocs_textbuf_t *textbuf, void *io); int ocs_mgmt_io_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *io); static ocs_mgmt_functions_t io_mgmt_functions = { .get_list_handler = ocs_mgmt_io_list, .get_handler = ocs_mgmt_io_get, .get_all_handler = ocs_mgmt_io_get_all, }; /** * @brief IO pool. * * Structure encapsulating a pool of IO objects. * */ struct ocs_io_pool_s { ocs_t *ocs; /* Pointer to device object */ ocs_lock_t lock; /* IO pool lock */ uint32_t io_num_ios; /* Total IOs allocated */ ocs_pool_t *pool; }; /** * @brief Create a pool of IO objects. * * @par Description * This function allocates memory in larger chucks called * "slabs" which are a fixed size. It calculates the number of IO objects that * fit within each "slab" and determines the number of "slabs" required to * allocate the number of IOs requested. Each of the slabs is allocated and * then it grabs each IO object within the slab and adds it to the free list. * Individual command, response and SGL DMA buffers are allocated for each IO. * * "Slabs" * +----------------+ * | | * +----------------+ | * | IO | | * +----------------+ | * | ... | | * +----------------+__+ * | IO | * +----------------+ * * @param ocs Driver instance's software context. * @param num_io Number of IO contexts to allocate. * @param num_sgl Number of SGL entries to allocate for each IO. * * @return Returns a pointer to a new ocs_io_pool_t on success, * or NULL on failure. */ ocs_io_pool_t * ocs_io_pool_create(ocs_t *ocs, uint32_t num_io, uint32_t num_sgl) { uint32_t i = 0; int32_t rc = -1; ocs_io_pool_t *io_pool; /* Allocate the IO pool */ io_pool = ocs_malloc(ocs, sizeof(*io_pool), OCS_M_ZERO | OCS_M_NOWAIT); if (io_pool == NULL) { ocs_log_err(ocs, "allocate of IO pool failed\n"); - return NULL;; + return NULL; } io_pool->ocs = ocs; io_pool->io_num_ios = num_io; /* initialize IO pool lock */ ocs_lock_init(ocs, &io_pool->lock, "io_pool lock[%d]", ocs->instance_index); io_pool->pool = ocs_pool_alloc(ocs, sizeof(ocs_io_t), io_pool->io_num_ios, FALSE); for (i = 0; i < io_pool->io_num_ios; i++) { ocs_io_t *io = ocs_pool_get_instance(io_pool->pool, i); io->tag = i; io->instance_index = i; io->ocs = ocs; /* allocate a command/response dma buffer */ if (ocs->enable_ini) { rc = ocs_dma_alloc(ocs, &io->cmdbuf, SCSI_CMD_BUF_LENGTH, OCS_MIN_DMA_ALIGNMENT); if (rc) { ocs_log_err(ocs, "ocs_dma_alloc cmdbuf failed\n"); ocs_io_pool_free(io_pool); return NULL; } } /* Allocate a response buffer */ rc = ocs_dma_alloc(ocs, &io->rspbuf, SCSI_RSP_BUF_LENGTH, OCS_MIN_DMA_ALIGNMENT); if (rc) { ocs_log_err(ocs, "ocs_dma_alloc cmdbuf failed\n"); ocs_io_pool_free(io_pool); return NULL; } /* Allocate SGL */ io->sgl = ocs_malloc(ocs, sizeof(*io->sgl) * num_sgl, OCS_M_NOWAIT | OCS_M_ZERO); if (io->sgl == NULL) { ocs_log_err(ocs, "malloc sgl's failed\n"); ocs_io_pool_free(io_pool); return NULL; } io->sgl_allocated = num_sgl; io->sgl_count = 0; /* Make IO backend call to initialize IO */ ocs_scsi_tgt_io_init(io); ocs_scsi_ini_io_init(io); rc = ocs_dma_alloc(ocs, &io->els_req, OCS_ELS_REQ_LEN, OCS_MIN_DMA_ALIGNMENT); if (rc) { ocs_log_err(ocs, "ocs_dma_alloc els_req failed\n"); ocs_io_pool_free(io_pool); return NULL; } rc = ocs_dma_alloc(ocs, &io->els_rsp, OCS_ELS_GID_PT_RSP_LEN, OCS_MIN_DMA_ALIGNMENT); if (rc) { ocs_log_err(ocs, "ocs_dma_alloc els_rsp failed\n"); ocs_io_pool_free(io_pool); return NULL; } } return io_pool; } /** * @brief Free IO objects pool * * @par Description * The pool of IO objects are freed. * * @param io_pool Pointer to IO pool object. * * @return Returns 0 on success, or a negative error code value on failure. */ int32_t ocs_io_pool_free(ocs_io_pool_t *io_pool) { ocs_t *ocs; uint32_t i; ocs_io_t *io; if (io_pool != NULL) { ocs = io_pool->ocs; for (i = 0; i < io_pool->io_num_ios; i++) { io = ocs_pool_get_instance(io_pool->pool, i); if (!io) continue; ocs_scsi_tgt_io_exit(io); ocs_scsi_ini_io_exit(io); if (io->sgl) { ocs_free(ocs, io->sgl, sizeof(*io->sgl) * io->sgl_allocated); } ocs_dma_free(ocs, &io->cmdbuf); ocs_dma_free(ocs, &io->rspbuf); ocs_dma_free(ocs, &io->els_req); ocs_dma_free(ocs, &io->els_rsp); } if (io_pool->pool != NULL) { ocs_pool_free(io_pool->pool); } ocs_lock_free(&io_pool->lock); ocs_free(ocs, io_pool, sizeof(*io_pool)); ocs->xport->io_pool = NULL; } return 0; } uint32_t ocs_io_pool_allocated(ocs_io_pool_t *io_pool) { return io_pool->io_num_ios; } /** * @ingroup io_alloc * @brief Allocate an object used to track an IO. * * @param io_pool Pointer to the IO pool. * * @return Returns the pointer to a new object, or NULL if none available. */ ocs_io_t * ocs_io_pool_io_alloc(ocs_io_pool_t *io_pool) { ocs_io_t *io = NULL; ocs_t *ocs; ocs_assert(io_pool, NULL); ocs = io_pool->ocs; ocs_lock(&io_pool->lock); if ((io = ocs_pool_get(io_pool->pool)) != NULL) { ocs_unlock(&io_pool->lock); io->io_type = OCS_IO_TYPE_MAX; io->hio_type = OCS_HW_IO_MAX; io->hio = NULL; io->transferred = 0; io->ocs = ocs; io->timeout = 0; io->sgl_count = 0; io->tgt_task_tag = 0; io->init_task_tag = 0; io->hw_tag = 0; io->display_name = "pending"; io->seq_init = 0; io->els_req_free = 0; io->mgmt_functions = &io_mgmt_functions; io->io_free = 0; ocs_atomic_add_return(&ocs->xport->io_active_count, 1); ocs_atomic_add_return(&ocs->xport->io_total_alloc, 1); } else { ocs_unlock(&io_pool->lock); } return io; } /** * @ingroup io_alloc * @brief Free an object used to track an IO. * * @param io_pool Pointer to IO pool object. * @param io Pointer to the IO object. */ void ocs_io_pool_io_free(ocs_io_pool_t *io_pool, ocs_io_t *io) { ocs_t *ocs; ocs_hw_io_t *hio = NULL; ocs_assert(io_pool); ocs = io_pool->ocs; ocs_lock(&io_pool->lock); hio = io->hio; io->hio = NULL; ocs_pool_put(io_pool->pool, io); ocs_unlock(&io_pool->lock); if (hio) { ocs_hw_io_free(&ocs->hw, hio); } io->io_free = 1; ocs_atomic_sub_return(&ocs->xport->io_active_count, 1); ocs_atomic_add_return(&ocs->xport->io_total_free, 1); } /** * @ingroup io_alloc * @brief Find an I/O given it's node and ox_id. * * @param ocs Driver instance's software context. * @param node Pointer to node. * @param ox_id OX_ID to find. * @param rx_id RX_ID to find (0xffff for unassigned). */ ocs_io_t * ocs_io_find_tgt_io(ocs_t *ocs, ocs_node_t *node, uint16_t ox_id, uint16_t rx_id) { ocs_io_t *io = NULL; ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) if ((io->cmd_tgt && (io->init_task_tag == ox_id)) && ((rx_id == 0xffff) || (io->tgt_task_tag == rx_id))) { break; } ocs_unlock(&node->active_ios_lock); return io; } /** * @ingroup io_alloc * @brief Return IO context given the instance index. * * @par Description * Returns a pointer to the IO context given by the instance index. * * @param ocs Pointer to driver structure. * @param index IO instance index to return. * * @return Returns a pointer to the IO context, or NULL if not found. */ ocs_io_t * ocs_io_get_instance(ocs_t *ocs, uint32_t index) { ocs_xport_t *xport = ocs->xport; ocs_io_pool_t *io_pool = xport->io_pool; return ocs_pool_get_instance(io_pool->pool, index); } /** * @brief Generate IO context ddump data. * * The ddump data for an IO context is generated. * * @param textbuf Pointer to text buffer. * @param io Pointer to IO context. * * @return None. */ void ocs_ddump_io(ocs_textbuf_t *textbuf, ocs_io_t *io) { ocs_ddump_section(textbuf, "io", io->instance_index); ocs_ddump_value(textbuf, "display_name", "%s", io->display_name); ocs_ddump_value(textbuf, "node_name", "%s", io->node->display_name); ocs_ddump_value(textbuf, "ref_count", "%d", ocs_ref_read_count(&io->ref)); ocs_ddump_value(textbuf, "io_type", "%d", io->io_type); ocs_ddump_value(textbuf, "hio_type", "%d", io->hio_type); ocs_ddump_value(textbuf, "cmd_tgt", "%d", io->cmd_tgt); ocs_ddump_value(textbuf, "cmd_ini", "%d", io->cmd_ini); ocs_ddump_value(textbuf, "send_abts", "%d", io->send_abts); ocs_ddump_value(textbuf, "init_task_tag", "0x%x", io->init_task_tag); ocs_ddump_value(textbuf, "tgt_task_tag", "0x%x", io->tgt_task_tag); ocs_ddump_value(textbuf, "hw_tag", "0x%x", io->hw_tag); ocs_ddump_value(textbuf, "tag", "0x%x", io->tag); ocs_ddump_value(textbuf, "timeout", "%d", io->timeout); ocs_ddump_value(textbuf, "tmf_cmd", "%d", io->tmf_cmd); ocs_ddump_value(textbuf, "abort_rx_id", "0x%x", io->abort_rx_id); ocs_ddump_value(textbuf, "busy", "%d", ocs_io_busy(io)); ocs_ddump_value(textbuf, "transferred", "%zu", io->transferred); ocs_ddump_value(textbuf, "auto_resp", "%d", io->auto_resp); ocs_ddump_value(textbuf, "exp_xfer_len", "%d", io->exp_xfer_len); ocs_ddump_value(textbuf, "xfer_req", "%d", io->xfer_req); ocs_ddump_value(textbuf, "seq_init", "%d", io->seq_init); ocs_ddump_value(textbuf, "alloc_link", "%d", ocs_list_on_list(&io->io_alloc_link)); ocs_ddump_value(textbuf, "pending_link", "%d", ocs_list_on_list(&io->io_pending_link)); ocs_ddump_value(textbuf, "backend_link", "%d", ocs_list_on_list(&io->link)); if (io->hio) { ocs_ddump_value(textbuf, "hw_tag", "%#x", io->hio->reqtag); ocs_ddump_value(textbuf, "hw_xri", "%#x", io->hio->indicator); ocs_ddump_value(textbuf, "hw_type", "%#x", io->hio->type); } else { ocs_ddump_value(textbuf, "hw_tag", "%s", "pending"); ocs_ddump_value(textbuf, "hw_xri", "%s", "pending"); ocs_ddump_value(textbuf, "hw_type", "%s", "pending"); } ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_IO, io); ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_IO, io); ocs_ddump_endsection(textbuf, "io", io->instance_index); } void ocs_mgmt_io_list(ocs_textbuf_t *textbuf, void *object) { /* Readonly values */ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "init_task_tag"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "tag"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "transferred"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "auto_resp"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "exp_xfer_len"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "xfer_req"); } int ocs_mgmt_io_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object) { char qualifier[80]; int retval = -1; ocs_io_t *io = (ocs_io_t *) object; snprintf(qualifier, sizeof(qualifier), "%s/io[%d]", parent, io->instance_index); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = name + strlen(qualifier) +1; /* See if it's a value I can supply */ if (ocs_strcmp(unqualified_name, "display_name") == 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", io->display_name); retval = 0; } else if (ocs_strcmp(unqualified_name, "init_task_tag") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "init_task_tag", "0x%x", io->init_task_tag); retval = 0; } else if (ocs_strcmp(unqualified_name, "tgt_task_tag") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tgt_task_tag", "0x%x", io->tgt_task_tag); retval = 0; } else if (ocs_strcmp(unqualified_name, "hw_tag") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_tag", "0x%x", io->hw_tag); retval = 0; } else if (ocs_strcmp(unqualified_name, "tag") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tag", "0x%x", io->tag); retval = 0; } else if (ocs_strcmp(unqualified_name, "transferred") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "transferred", "%zu", io->transferred); retval = 0; } else if (ocs_strcmp(unqualified_name, "auto_resp") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "auto_resp", io->auto_resp); retval = 0; } else if (ocs_strcmp(unqualified_name, "exp_xfer_len") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "exp_xfer_len", "%d", io->exp_xfer_len); retval = 0; } else if (ocs_strcmp(unqualified_name, "xfer_req") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "xfer_req", "%d", io->xfer_req); retval = 0; } } return retval; } void ocs_mgmt_io_get_all(ocs_textbuf_t *textbuf, void *object) { ocs_io_t *io = (ocs_io_t *) object; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", io->display_name); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "init_task_tag", "0x%x", io->init_task_tag); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tgt_task_tag", "0x%x", io->tgt_task_tag); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_tag", "0x%x", io->hw_tag); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "tag", "0x%x", io->tag); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "transferred", "%zu", io->transferred); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "auto_resp", io->auto_resp); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "exp_xfer_len", "%d", io->exp_xfer_len); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "xfer_req", "%d", io->xfer_req); } Index: head/sys/dev/ocs_fc/ocs_mgmt.c =================================================================== --- head/sys/dev/ocs_fc/ocs_mgmt.c (revision 359440) +++ head/sys/dev/ocs_fc/ocs_mgmt.c (revision 359441) @@ -1,2929 +1,2929 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * The ocs_mgmt top level functions for Fibre Channel. */ /** * @defgroup mgmt Management Functions */ #include "ocs.h" #include "ocs_mgmt.h" #include "ocs_vpd.h" #define SFP_PAGE_SIZE 128 /* Executables*/ static int ocs_mgmt_firmware_write(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); static int ocs_mgmt_firmware_reset(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); static int ocs_mgmt_function_reset(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); static void ocs_mgmt_fw_write_cb(int32_t status, uint32_t actual_write_length, uint32_t change_status, void *arg); static int ocs_mgmt_force_assert(ocs_t *ocs, char *, void *buf, uint32_t buf_len, void*, uint32_t); #if defined(OCS_INCLUDE_RAMD) static int32_t ocs_mgmt_read_phys(ocs_t *ocs, char *, void *, uint32_t , void *, uint32_t); #endif /* Getters */ static void get_nodes_count(ocs_t *, char *, ocs_textbuf_t*); static void get_desc(ocs_t *, char *, ocs_textbuf_t*); static void get_fw_rev(ocs_t *, char *, ocs_textbuf_t*); static void get_fw_rev2(ocs_t *, char *, ocs_textbuf_t*); static void get_ipl(ocs_t *, char *, ocs_textbuf_t*); static void get_wwnn(ocs_t *, char *, ocs_textbuf_t*); static void get_wwpn(ocs_t *, char *, ocs_textbuf_t*); static void get_fcid(ocs_t *, char *, ocs_textbuf_t *); static void get_sn(ocs_t *, char *, ocs_textbuf_t*); static void get_pn(ocs_t *, char *, ocs_textbuf_t*); static void get_sli4_intf_reg(ocs_t *, char *, ocs_textbuf_t*); static void get_phy_port_num(ocs_t *, char *, ocs_textbuf_t*); static void get_asic_id(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_vendor(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_device(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_subsystem_vendor(ocs_t *, char *, ocs_textbuf_t*); static void get_pci_subsystem_device(ocs_t *, char *, ocs_textbuf_t*); static void get_businfo(ocs_t *, char *, ocs_textbuf_t*); static void get_sfp_a0(ocs_t *, char *, ocs_textbuf_t*); static void get_sfp_a2(ocs_t *, char *, ocs_textbuf_t*); static void get_hw_rev1(ocs_t *, char *, ocs_textbuf_t*); static void get_hw_rev2(ocs_t *, char *, ocs_textbuf_t*); static void get_hw_rev3(ocs_t *, char *, ocs_textbuf_t*); static void get_debug_mq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_debug_cq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_debug_wq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_debug_eq_dump(ocs_t*, char*, ocs_textbuf_t*); static void get_logmask(ocs_t*, char*, ocs_textbuf_t*); static void get_current_speed(ocs_t*, char*, ocs_textbuf_t*); static void get_current_topology(ocs_t*, char*, ocs_textbuf_t*); static void get_current_link_state(ocs_t*, char*, ocs_textbuf_t*); static void get_configured_speed(ocs_t*, char*, ocs_textbuf_t*); static void get_configured_topology(ocs_t*, char*, ocs_textbuf_t*); static void get_configured_link_state(ocs_t*, char*, ocs_textbuf_t*); static void get_linkcfg(ocs_t*, char*, ocs_textbuf_t*); static void get_req_wwnn(ocs_t*, char*, ocs_textbuf_t*); static void get_req_wwpn(ocs_t*, char*, ocs_textbuf_t*); static void get_nodedb_mask(ocs_t*, char*, ocs_textbuf_t*); static void get_profile_list(ocs_t*, char*, ocs_textbuf_t*); static void get_active_profile(ocs_t*, char*, ocs_textbuf_t*); static void get_port_protocol(ocs_t*, char*, ocs_textbuf_t*); static void get_driver_version(ocs_t*, char*, ocs_textbuf_t*); static void get_chip_type(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_tgt_rscn_delay(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_tgt_rscn_period(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_free_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_drop_data(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_inject_drop_resp(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_cmd_err_inject(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_cmd_delay_value(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_nv_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_nv_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_loglevel(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); static void get_node_abort_cnt(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf); /* Setters */ static int set_debug_mq_dump(ocs_t*, char*, char*); static int set_debug_cq_dump(ocs_t*, char*, char*); static int set_debug_wq_dump(ocs_t*, char*, char*); static int set_debug_eq_dump(ocs_t*, char*, char*); static int set_logmask(ocs_t*, char*, char*); static int set_configured_link_state(ocs_t*, char*, char*); static int set_linkcfg(ocs_t*, char*, char*); static int set_nodedb_mask(ocs_t*, char*, char*); static int set_port_protocol(ocs_t*, char*, char*); static int set_active_profile(ocs_t*, char*, char*); static int set_tgt_rscn_delay(ocs_t*, char*, char*); static int set_tgt_rscn_period(ocs_t*, char*, char*); static int set_inject_drop_cmd(ocs_t*, char*, char*); static int set_inject_free_drop_cmd(ocs_t*, char*, char*); static int set_inject_drop_data(ocs_t*, char*, char*); static int set_inject_drop_resp(ocs_t*, char*, char*); static int set_cmd_err_inject(ocs_t*, char*, char*); static int set_cmd_delay_value(ocs_t*, char*, char*); static int set_nv_wwn(ocs_t*, char*, char*); static int set_loglevel(ocs_t*, char*, char*); static void ocs_mgmt_linkcfg_cb(int32_t status, uintptr_t value, void *arg); #if defined(OCS_INCLUDE_RAMD) static void* find_address_in_target(ocs_ramdisc_t **ramdisc_array, uint32_t ramdisc_count, uintptr_t target_addr); #endif ocs_mgmt_table_entry_t mgmt_table[] = { {"nodes_count", get_nodes_count, NULL, NULL}, {"desc", get_desc, NULL, NULL}, {"fw_rev", get_fw_rev, NULL, NULL}, {"fw_rev2", get_fw_rev2, NULL, NULL}, {"ipl", get_ipl, NULL, NULL}, {"hw_rev1", get_hw_rev1, NULL, NULL}, {"hw_rev2", get_hw_rev2, NULL, NULL}, {"hw_rev3", get_hw_rev3, NULL, NULL}, {"wwnn", get_wwnn, NULL, NULL}, {"wwpn", get_wwpn, NULL, NULL}, {"fc_id", get_fcid, NULL, NULL}, {"sn", get_sn, NULL, NULL}, {"pn", get_pn, NULL, NULL}, {"sli4_intf_reg", get_sli4_intf_reg, NULL, NULL}, {"phy_port_num", get_phy_port_num, NULL, NULL}, {"asic_id_reg", get_asic_id, NULL, NULL}, {"pci_vendor", get_pci_vendor, NULL, NULL}, {"pci_device", get_pci_device, NULL, NULL}, {"pci_subsystem_vendor", get_pci_subsystem_vendor, NULL, NULL}, {"pci_subsystem_device", get_pci_subsystem_device, NULL, NULL}, {"businfo", get_businfo, NULL, NULL}, {"sfp_a0", get_sfp_a0, NULL, NULL}, {"sfp_a2", get_sfp_a2, NULL, NULL}, {"profile_list", get_profile_list, NULL, NULL}, {"driver_version", get_driver_version, NULL, NULL}, {"current_speed", get_current_speed, NULL, NULL}, {"current_topology", get_current_topology, NULL, NULL}, {"current_link_state", get_current_link_state, NULL, NULL}, {"chip_type", get_chip_type, NULL, NULL}, {"configured_speed", get_configured_speed, set_configured_speed, NULL}, {"configured_topology", get_configured_topology, set_configured_topology, NULL}, {"configured_link_state", get_configured_link_state, set_configured_link_state, NULL}, {"debug_mq_dump", get_debug_mq_dump, set_debug_mq_dump, NULL}, {"debug_cq_dump", get_debug_cq_dump, set_debug_cq_dump, NULL}, {"debug_wq_dump", get_debug_wq_dump, set_debug_wq_dump, NULL}, {"debug_eq_dump", get_debug_eq_dump, set_debug_eq_dump, NULL}, {"logmask", get_logmask, set_logmask, NULL}, {"loglevel", get_loglevel, set_loglevel, NULL}, {"linkcfg", get_linkcfg, set_linkcfg, NULL}, {"requested_wwnn", get_req_wwnn, set_req_wwnn, NULL}, {"requested_wwpn", get_req_wwpn, set_req_wwpn, NULL}, {"nodedb_mask", get_nodedb_mask, set_nodedb_mask, NULL}, {"port_protocol", get_port_protocol, set_port_protocol, NULL}, {"active_profile", get_active_profile, set_active_profile, NULL}, {"firmware_write", NULL, NULL, ocs_mgmt_firmware_write}, {"firmware_reset", NULL, NULL, ocs_mgmt_firmware_reset}, {"function_reset", NULL, NULL, ocs_mgmt_function_reset}, #if defined(OCS_INCLUDE_RAMD) {"read_phys", NULL, NULL, ocs_mgmt_read_phys}, #endif {"force_assert", NULL, NULL, ocs_mgmt_force_assert}, {"tgt_rscn_delay", get_tgt_rscn_delay, set_tgt_rscn_delay, NULL}, {"tgt_rscn_period", get_tgt_rscn_period, set_tgt_rscn_period, NULL}, {"inject_drop_cmd", get_inject_drop_cmd, set_inject_drop_cmd, NULL}, {"inject_free_drop_cmd", get_inject_free_drop_cmd, set_inject_free_drop_cmd, NULL}, {"inject_drop_data", get_inject_drop_data, set_inject_drop_data, NULL}, {"inject_drop_resp", get_inject_drop_resp, set_inject_drop_resp, NULL}, {"cmd_err_inject", get_cmd_err_inject, set_cmd_err_inject, NULL}, {"cmd_delay_value", get_cmd_delay_value, set_cmd_delay_value, NULL}, {"nv_wwpn", get_nv_wwpn, NULL, NULL}, {"nv_wwnn", get_nv_wwnn, NULL, NULL}, {"nv_wwn", NULL, set_nv_wwn, NULL}, {"node_abort_cnt", get_node_abort_cnt, NULL, NULL}, }; /** * @ingroup mgmt * @brief Get a list of options supported by the driver. * * @par Description * This is the top level "get list" handler for the driver. It * performs the following: * - Adds entries to the textbuf for any actions supported by this level in the driver. * - Calls a back-end function to add any actions supported by the back-end. * - Calls a function on each child (domain) to recursively add supported actions. * * @param ocs Pointer to the ocs structure. * @param textbuf Pointer to an ocs_textbuf, which is used to accumulate the results. * * @return Returns 0 on success, or a negative value on failure. */ void ocs_mgmt_get_list(ocs_t *ocs, ocs_textbuf_t *textbuf) { ocs_domain_t *domain; uint32_t i; int access; ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); for (i=0;imgmt_functions) && (ocs->mgmt_functions->get_list_handler)) { ocs->mgmt_functions->get_list_handler(textbuf, ocs); } if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_list_handler)) { ocs->tgt_mgmt_functions->get_list_handler(textbuf, &(ocs->tgt_ocs)); } /* Have each of my children add their actions */ if (ocs_device_lock_try(ocs) == TRUE) { /* If we get here then we are holding the device lock */ ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->get_list_handler)) { domain->mgmt_functions->get_list_handler(textbuf, domain); } } ocs_device_unlock(ocs); } ocs_mgmt_end_unnumbered_section(textbuf, "ocs"); } /** * @ingroup mgmt * @brief Return the value of a management item. * * @par Description * This is the top level "get" handler for the driver. It * performs the following: * - Checks that the qualifier portion of the name begins with my qualifier (ocs). * - If the remaining part of the name matches a parameter that is known at this level, * writes the value into textbuf. * - If the name is not known, sends the request to the back-ends to fulfill (if possible). * - If the request has not been fulfilled by the back-end, * passes the request to each of the children (domains) to * have them (recursively) try to respond. * * In passing the request to other entities, the request is considered to be answered * when a response has been written into textbuf, indicated by textbuf->buffer_written * being non-zero. * * @param ocs Pointer to the ocs structure. * @param name Name of the status item to be retrieved. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return Returns 0 if the value was found and returned, or -1 if an error occurred. */ int ocs_mgmt_get(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_domain_t *domain; char qualifier[6]; int retval = -1; uint32_t i; ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); snprintf(qualifier, sizeof(qualifier), "/ocs"); /* See if the name starts with my qualifier. If not then this request isn't for me */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = name + strlen(qualifier) + 1; for (i=0;imgmt_functions) && (ocs->mgmt_functions->get_handler)) { retval = ocs->mgmt_functions->get_handler(textbuf, qualifier, (char*)name, ocs); } if (retval != 0) { if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_handler)) { retval = ocs->tgt_mgmt_functions->get_handler(textbuf, qualifier, (char*)name, &(ocs->tgt_ocs)); } } if (retval != 0) { /* The driver didn't handle it, pass it to each domain */ ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->get_handler)) { retval = domain->mgmt_functions->get_handler(textbuf, qualifier, (char*)name, domain); } if (retval == 0) { break; } } ocs_device_unlock(ocs); } } ocs_mgmt_end_unnumbered_section(textbuf, "ocs"); return retval; } /** * @ingroup mgmt * @brief Set the value of a mgmt item. * * @par Description * This is the top level "set" handler for the driver. It * performs the following: * - Checks that the qualifier portion of the name begins with my qualifier (ocs). * - If the remaining part of the name matches a parameter that is known at this level, * calls the correct function to change the configuration. * - If the name is not known, sends the request to the back-ends to fulfill (if possible). * - If the request has not been fulfilled by the back-end, passes the request to each of the * children (domains) to have them (recursively) try to respond. * * In passing the request to other entities, the request is considered to be handled * if the function returns 0. * * @param ocs Pointer to the ocs structure. * @param name Name of the property to be changed. * @param value Requested new value of the property. * * @return Returns 0 if the configuration value was updated, or -1 otherwise. */ int ocs_mgmt_set(ocs_t *ocs, char *name, char *value) { ocs_domain_t *domain; int result = -1; char qualifier[80]; uint32_t i; snprintf(qualifier, sizeof(qualifier), "/ocs"); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = name + strlen(qualifier) +1; /* See if it's a value I can set */ for (i=0;imgmt_functions) && (ocs->mgmt_functions->set_handler)) { result = ocs->mgmt_functions->set_handler(qualifier, name, (char *)value, ocs); } if (result != 0) { if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->set_handler)) { result = ocs->tgt_mgmt_functions->set_handler(qualifier, name, (char *)value, &(ocs->tgt_ocs)); } } /* If I didn't know how to set this config value pass the request to each of my children */ if (result != 0) { ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->set_handler)) { result = domain->mgmt_functions->set_handler(qualifier, name, (char*)value, domain); } if (result == 0) { break; } } ocs_device_unlock(ocs); } } return result; } /** * @ingroup mgmt * @brief Perform a management action. * * @par Description * This is the top level "exec" handler for the driver. It * performs the following: * - Checks that the qualifier portion of the name begins with my qualifier (ocs). * - If the remaining part of the name matches an action that is known at this level, * calls the correct function to perform the action. * - If the name is not known, sends the request to the back-ends to fulfill (if possible). * - If the request has not been fulfilled by the back-end, passes the request to each of the * children (domains) to have them (recursively) try to respond. * * In passing the request to other entities, the request is considered to be handled * if the function returns 0. * * @param ocs Pointer to the ocs structure. * @param action Name of the action to be performed. * @param arg_in Pointer to an argument being passed to the action. * @param arg_in_length Length of the argument pointed to by @c arg_in. * @param arg_out Pointer to an argument being passed to the action. * @param arg_out_length Length of the argument pointed to by @c arg_out. * * @return Returns 0 if the action was completed, or -1 otherwise. * * */ int ocs_mgmt_exec(ocs_t *ocs, char *action, void *arg_in, uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length) { ocs_domain_t *domain; int result = -1; char qualifier[80]; uint32_t i; snprintf(qualifier, sizeof(qualifier), "/ocs"); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = action + strlen(qualifier) +1; /* See if it's an action I can perform */ for (i=0;imgmt_functions) && (ocs->mgmt_functions->exec_handler)) { result = ocs->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, ocs); } if (result != 0) { if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->exec_handler)) { result = ocs->tgt_mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, &(ocs->tgt_ocs)); } } /* If I didn't know how to do this action pass the request to each of my children */ if (result != 0) { ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->exec_handler)) { result = domain->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, domain); } if (result == 0) { break; } } ocs_device_unlock(ocs); } } return result; } void ocs_mgmt_get_all(ocs_t *ocs, ocs_textbuf_t *textbuf) { ocs_domain_t *domain; uint32_t i; ocs_mgmt_start_unnumbered_section(textbuf, "ocs"); for (i=0;imgmt_functions) && (ocs->mgmt_functions->get_all_handler)) { ocs->mgmt_functions->get_all_handler(textbuf, ocs); } if ((ocs->tgt_mgmt_functions) && (ocs->tgt_mgmt_functions->get_all_handler)) { ocs->tgt_mgmt_functions->get_all_handler(textbuf, &(ocs->tgt_ocs)); } ocs_device_lock(ocs); ocs_list_foreach(&ocs->domain_list, domain) { if ((domain->mgmt_functions) && (domain->mgmt_functions->get_all_handler)) { domain->mgmt_functions->get_all_handler(textbuf, domain); } } ocs_device_unlock(ocs); ocs_mgmt_end_unnumbered_section(textbuf, "ocs"); } #if defined(OCS_INCLUDE_RAMD) static int32_t ocs_mgmt_read_phys(ocs_t *ocs, char *name, void *arg_in, uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length) { uint32_t length; char addr_str[80]; uintptr_t target_addr; void* vaddr = NULL; ocs_ramdisc_t **ramdisc_array; uint32_t ramdisc_count; if ((arg_in == NULL) || (arg_in_length == 0) || (arg_out == NULL) || (arg_out_length == 0)) { return -1; } if (arg_in_length > 80) { arg_in_length = 80; } if (ocs_copy_from_user(addr_str, arg_in, arg_in_length)) { ocs_log_test(ocs, "Failed to copy addr from user\n"); return -EFAULT; } target_addr = (uintptr_t)ocs_strtoul(addr_str, NULL, 0); /* addr_str must be the physical address of a buffer that was reported * in an SGL. Search ramdiscs looking for a segment that contains that * physical address */ if (ocs->tgt_ocs.use_global_ramd) { /* Only one target */ ramdisc_count = ocs->tgt_ocs.rdisc_count; ramdisc_array = ocs->tgt_ocs.rdisc; vaddr = find_address_in_target(ramdisc_array, ramdisc_count, target_addr); } else { /* Multiple targets. Each target is on a sport */ uint32_t domain_idx; for (domain_idx=0; domain_idxdomain_instance_count; domain_idx++) { ocs_domain_t *domain; uint32_t sport_idx; domain = ocs_domain_get_instance(ocs, domain_idx); for (sport_idx=0; sport_idx < domain->sport_instance_count; sport_idx++) { ocs_sport_t *sport; sport = ocs_sport_get_instance(domain, sport_idx); ramdisc_count = sport->tgt_sport.rdisc_count; ramdisc_array = sport->tgt_sport.rdisc; vaddr = find_address_in_target(ramdisc_array, ramdisc_count, target_addr); if (vaddr != NULL) { break; } } } } length = arg_out_length; if (vaddr != NULL) { if (ocs_copy_to_user(arg_out, vaddr, length)) { ocs_log_test(ocs, "Failed to copy buffer to user\n"); return -EFAULT; } return 0; } else { return -EFAULT; } } /* * This function searches a target for a given physical address. * The target is made up of a number of LUNs, each represented by * a ocs_ramdisc_t. */ static void* find_address_in_target(ocs_ramdisc_t **ramdisc_array, uint32_t ramdisc_count, uintptr_t target_addr) { void *vaddr = NULL; uint32_t ramdisc_idx; /* Check each ramdisc */ for (ramdisc_idx=0; ramdisc_idxsegment_count; segment_idx++) { ramdisc_segment_t *segment = rdisc->segments[segment_idx]; uintptr_t segment_start; uintptr_t segment_end; uint32_t offset; segment_start = segment->data_segment.phys; segment_end = segment->data_segment.phys + segment->data_segment.size - 1; if ((target_addr >= segment_start) && (target_addr <= segment_end)) { /* Found the target address */ offset = target_addr - segment_start; vaddr = (uint32_t*)segment->data_segment.virt + offset; } if (rdisc->dif_separate) { segment_start = segment->dif_segment.phys; segment_end = segment->data_segment.phys + segment->dif_segment.size - 1; if ((target_addr >= segment_start) && (target_addr <= segment_end)) { /* Found the target address */ offset = target_addr - segment_start; vaddr = (uint32_t*)segment->dif_segment.virt + offset; } } if (vaddr != NULL) { break; } } if (vaddr != NULL) { break; } } return vaddr; } #endif static int32_t ocs_mgmt_firmware_reset(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { int rc = 0; int index = 0; uint8_t bus, dev, func; ocs_t *other_ocs; ocs_get_bus_dev_func(ocs, &bus, &dev, &func); ocs_log_debug(ocs, "Resetting port\n"); if (ocs_hw_reset(&ocs->hw, OCS_HW_RESET_FIRMWARE)) { ocs_log_test(ocs, "failed to reset port\n"); rc = -1; } else { ocs_log_debug(ocs, "successfully reset port\n"); /* now reset all functions on the same device */ while ((other_ocs = ocs_get_instance(index++)) != NULL) { uint8_t other_bus, other_dev, other_func; ocs_get_bus_dev_func(other_ocs, &other_bus, &other_dev, &other_func); if ((bus == other_bus) && (dev == other_dev)) { if (other_ocs->hw.state != OCS_HW_STATE_UNINITIALIZED) { other_ocs->hw.state = OCS_HW_STATE_QUEUES_ALLOCATED; } ocs_device_detach(other_ocs); if (ocs_device_attach(other_ocs)) { ocs_log_err(other_ocs, "device %d attach failed \n", index); rc = -1; } } } } return rc; } static int32_t ocs_mgmt_function_reset(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { int32_t rc; ocs_device_detach(ocs); rc = ocs_device_attach(ocs); return rc; } static int32_t ocs_mgmt_firmware_write(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { int rc = 0; uint32_t bytes_left; uint32_t xfer_size; uint32_t offset; uint8_t *userp; ocs_dma_t dma; int last = 0; ocs_mgmt_fw_write_result_t result; uint32_t change_status = 0; char status_str[80]; ocs_sem_init(&(result.semaphore), 0, "fw_write"); bytes_left = buf_len; offset = 0; userp = (uint8_t *)buf; if (ocs_dma_alloc(ocs, &dma, FW_WRITE_BUFSIZE, 4096)) { ocs_log_err(ocs, "ocs_mgmt_firmware_write: malloc failed"); return -ENOMEM; } while (bytes_left > 0) { if (bytes_left > FW_WRITE_BUFSIZE) { xfer_size = FW_WRITE_BUFSIZE; } else { xfer_size = bytes_left; } /* Copy xfer_size bytes from user space to kernel buffer */ if (ocs_copy_from_user(dma.virt, userp, xfer_size)) { rc = -EFAULT; break; } /* See if this is the last block */ if (bytes_left == xfer_size) { last = 1; } /* Send the HW command */ ocs_hw_firmware_write(&ocs->hw, &dma, xfer_size, offset, last, ocs_mgmt_fw_write_cb, &result); /* Wait for semaphore to be signaled when the command completes * TODO: Should there be a timeout on this? If so, how long? */ if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { ocs_log_err(ocs, "ocs_sem_p failed\n"); rc = -ENXIO; break; } if (result.actual_xfer == 0) { ocs_log_test(ocs, "actual_write_length is %d\n", result.actual_xfer); rc = -EFAULT; break; } /* Check status */ if (result.status != 0) { ocs_log_test(ocs, "write returned status %d\n", result.status); rc = -EFAULT; break; } if (last) { change_status = result.change_status; } bytes_left -= result.actual_xfer; offset += result.actual_xfer; userp += result.actual_xfer; } /* Create string with status and copy to userland */ if ((arg_out_length > 0) && (arg_out != NULL)) { if (arg_out_length > sizeof(status_str)) { arg_out_length = sizeof(status_str); } ocs_memset(status_str, 0, sizeof(status_str)); ocs_snprintf(status_str, arg_out_length, "%d", change_status); if (ocs_copy_to_user(arg_out, status_str, arg_out_length)) { ocs_log_test(ocs, "copy to user failed for change_status\n"); } } ocs_dma_free(ocs, &dma); return rc; } static void ocs_mgmt_fw_write_cb(int32_t status, uint32_t actual_write_length, uint32_t change_status, void *arg) { ocs_mgmt_fw_write_result_t *result = arg; result->status = status; result->actual_xfer = actual_write_length; result->change_status = change_status; ocs_sem_v(&(result->semaphore)); } typedef struct ocs_mgmt_sfp_result { ocs_sem_t semaphore; ocs_lock_t cb_lock; int32_t running; int32_t status; uint32_t bytes_read; uint32_t page_data[32]; } ocs_mgmt_sfp_result_t; static void ocs_mgmt_sfp_cb(void *os, int32_t status, uint32_t bytes_read, uint32_t *data, void *arg) { ocs_mgmt_sfp_result_t *result = arg; ocs_t *ocs = os; ocs_lock(&(result->cb_lock)); result->running++; if(result->running == 2) { /* get_sfp() has timed out */ ocs_unlock(&(result->cb_lock)); ocs_free(ocs, result, sizeof(ocs_mgmt_sfp_result_t)); return; } result->status = status; result->bytes_read = bytes_read; ocs_memcpy(&result->page_data, data, SFP_PAGE_SIZE); ocs_sem_v(&(result->semaphore)); ocs_unlock(&(result->cb_lock)); } static int32_t ocs_mgmt_get_sfp(ocs_t *ocs, uint16_t page, void *buf, uint32_t buf_len) { int rc = 0; - ocs_mgmt_sfp_result_t *result = ocs_malloc(ocs, sizeof(ocs_mgmt_sfp_result_t), OCS_M_ZERO | OCS_M_NOWAIT);; + ocs_mgmt_sfp_result_t *result = ocs_malloc(ocs, sizeof(ocs_mgmt_sfp_result_t), OCS_M_ZERO | OCS_M_NOWAIT); ocs_sem_init(&(result->semaphore), 0, "get_sfp"); ocs_lock_init(ocs, &(result->cb_lock), "get_sfp"); /* Send the HW command */ ocs_hw_get_sfp(&ocs->hw, page, ocs_mgmt_sfp_cb, result); /* Wait for semaphore to be signaled when the command completes */ if (ocs_sem_p(&(result->semaphore), 5 * 1000 * 1000) != 0) { /* Timed out, callback will free memory */ ocs_lock(&(result->cb_lock)); result->running++; if(result->running == 1) { ocs_log_err(ocs, "ocs_sem_p failed\n"); ocs_unlock(&(result->cb_lock)); return (-ENXIO); } /* sfp_cb() has already executed, proceed as normal */ ocs_unlock(&(result->cb_lock)); } /* Check status */ if (result->status != 0) { ocs_log_test(ocs, "read_transceiver_data returned status %d\n", result->status); rc = -EFAULT; } if (rc == 0) { rc = (result->bytes_read > buf_len ? buf_len : result->bytes_read); /* Copy the results back to the supplied buffer */ ocs_memcpy(buf, result->page_data, rc); } ocs_free(ocs, result, sizeof(ocs_mgmt_sfp_result_t)); return rc; } static int32_t ocs_mgmt_force_assert(ocs_t *ocs, char *name, void *buf, uint32_t buf_len, void *arg_out, uint32_t arg_out_length) { ocs_assert(FALSE, 0); } static void get_nodes_count(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_t *xport = ocs->xport; ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "nodes_count", "%d", xport->nodes_count); } static void get_driver_version(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "driver_version", ocs->driver_version); } static void get_desc(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "desc", ocs->desc); } static void get_fw_rev(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "fw_rev", ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV)); } static void get_fw_rev2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "fw_rev2", ocs_hw_get_ptr(&ocs->hw, OCS_HW_FW_REV2)); } static void get_ipl(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "ipl", ocs_hw_get_ptr(&ocs->hw, OCS_HW_IPL)); } static void get_hw_rev1(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_HW_REV1, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev1", "%u", value); } static void get_hw_rev2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_HW_REV2, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev2", "%u", value); } static void get_hw_rev3(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&ocs->hw, OCS_HW_HW_REV3, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "hw_rev3", "%u", value); } static void get_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint64_t *wwnn; wwnn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_NODE); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%llx", (unsigned long long)ocs_htobe64(*wwnn)); } static void get_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint64_t *wwpn; wwpn = ocs_hw_get_ptr(&ocs->hw, OCS_HW_WWN_PORT); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%llx", (unsigned long long)ocs_htobe64(*wwpn)); } static void get_fcid(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { if (ocs->domain && ocs->domain->attached) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", ocs->domain->sport->fc_id); } else { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "UNKNOWN"); } } static void get_sn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *pserial; uint32_t len; char sn_buf[256]; pserial = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_SERIALNUMBER); if (pserial) { len = *pserial ++; strncpy(sn_buf, (char*)pserial, len); sn_buf[len] = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sn", sn_buf); } } static void get_pn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *pserial; uint32_t len; char sn_buf[256]; pserial = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_PARTNUMBER); if (pserial) { len = *pserial ++; strncpy(sn_buf, (char*)pserial, len); sn_buf[len] = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pn", sn_buf); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pn", ocs->model); } } static void get_sli4_intf_reg(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "sli4_intf_reg", "0x%04x", ocs_config_read32(ocs, SLI4_INTF_REG)); } static void get_phy_port_num(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char *phy_port = NULL; phy_port = ocs_scsi_get_property_ptr(ocs, OCS_SCSI_PORTNUM); if (phy_port) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "phy_port_num", phy_port); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "phy_port_num", "unknown"); } } static void get_asic_id(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "asic_id_reg", "0x%04x", ocs_config_read32(ocs, SLI4_ASIC_ID_REG)); } static void get_chip_type(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t family; uint32_t asic_id; uint32_t asic_gen_num; uint32_t asic_rev_num; uint32_t rev_id; char result_buf[80]; char tmp_buf[80]; family = (ocs_config_read32(ocs, SLI4_INTF_REG) & 0x00000f00) >> 8; asic_id = ocs_config_read32(ocs, SLI4_ASIC_ID_REG); asic_rev_num = asic_id & 0xff; asic_gen_num = (asic_id & 0xff00) >> 8; rev_id = ocs_config_read32(ocs, SLI4_PCI_CLASS_REVISION) & 0xff; switch(family) { case 0x00: /* BE2 */ ocs_strncpy(result_buf, "BE2 A", sizeof(result_buf)); ocs_snprintf(tmp_buf, 2, "%d", rev_id); strcat(result_buf, tmp_buf); break; case 0x01: /* BE3 */ ocs_strncpy(result_buf, "BE3", sizeof(result_buf)); if (rev_id >= 0x10) { strcat(result_buf, "-R"); } ocs_snprintf(tmp_buf, 3, " %c", ((rev_id & 0xf0) >> 4) + 'A'); strcat(result_buf, tmp_buf); ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f); strcat(result_buf, tmp_buf); break; case 0x02: /* Skyhawk A0 */ ocs_strncpy(result_buf, "Skyhawk A0", sizeof(result_buf)); break; case 0x0a: /* Lancer A0 */ ocs_strncpy(result_buf, "Lancer A", sizeof(result_buf)); ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f); strcat(result_buf, tmp_buf); break; case 0x0b: /* Lancer B0 or D0 */ ocs_strncpy(result_buf, "Lancer", sizeof(result_buf)); ocs_snprintf(tmp_buf, 3, " %c", ((rev_id & 0xf0) >> 4) + 'A'); strcat(result_buf, tmp_buf); ocs_snprintf(tmp_buf, 2, "%d", rev_id & 0x0f); strcat(result_buf, tmp_buf); break; case 0x0c: ocs_strncpy(result_buf, "Lancer G6", sizeof(result_buf)); break; case 0x0f: /* Refer to ASIC_ID */ switch(asic_gen_num) { case 0x00: ocs_strncpy(result_buf, "BE2", sizeof(result_buf)); break; case 0x03: ocs_strncpy(result_buf, "BE3-R", sizeof(result_buf)); break; case 0x04: ocs_strncpy(result_buf, "Skyhawk-R", sizeof(result_buf)); break; case 0x05: ocs_strncpy(result_buf, "Corsair", sizeof(result_buf)); break; case 0x0b: ocs_strncpy(result_buf, "Lancer", sizeof(result_buf)); break; case 0x0c: ocs_strncpy(result_buf, "LancerG6", sizeof(result_buf)); break; default: ocs_strncpy(result_buf, "Unknown", sizeof(result_buf)); } if (ocs_strcmp(result_buf, "Unknown") != 0) { ocs_snprintf(tmp_buf, 3, " %c", ((asic_rev_num & 0xf0) >> 4) + 'A'); strcat(result_buf, tmp_buf); ocs_snprintf(tmp_buf, 2, "%d", asic_rev_num & 0x0f); strcat(result_buf, tmp_buf); } break; default: ocs_strncpy(result_buf, "Unknown", sizeof(result_buf)); } ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "chip_type", result_buf); } static void get_pci_vendor(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_vendor", "0x%04x", ocs->pci_vendor); } static void get_pci_device(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_device", "0x%04x", ocs->pci_device); } static void get_pci_subsystem_vendor(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_subsystem_vendor", "0x%04x", ocs->pci_subsystem_vendor); } static void get_pci_subsystem_device(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "pci_subsystem_device", "0x%04x", ocs->pci_subsystem_device); } static void get_tgt_rscn_delay(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "tgt_rscn_delay", "%ld", (unsigned long)ocs->tgt_rscn_delay_msec / 1000); } static void get_tgt_rscn_period(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "tgt_rscn_period", "%ld", (unsigned long)ocs->tgt_rscn_period_msec / 1000); } static void get_inject_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_cmd", "%d", (ocs->err_injection == INJECT_DROP_CMD ? 1:0)); } static void get_inject_free_drop_cmd(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_free_drop_cmd", "%d", (ocs->err_injection == INJECT_FREE_DROPPED ? 1:0)); } static void get_inject_drop_data(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_data", "%d", (ocs->err_injection == INJECT_DROP_DATA ? 1:0)); } static void get_inject_drop_resp(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "inject_drop_resp", "%d", (ocs->err_injection == INJECT_DROP_RESP ? 1:0)); } static void get_cmd_err_inject(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "cmd_err_inject", "0x%02x", ocs->cmd_err_inject); } static void get_cmd_delay_value(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "cmd_delay_value", "%ld", (unsigned long)ocs->delay_value_msec); } static void get_businfo(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "businfo", ocs->businfo); } static void get_sfp_a0(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *page_data; char *buf; int i; int32_t bytes_read; page_data = ocs_malloc(ocs, SFP_PAGE_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (page_data == NULL) { return; } buf = ocs_malloc(ocs, (SFP_PAGE_SIZE * 3) + 1, OCS_M_ZERO | OCS_M_NOWAIT); if (buf == NULL) { ocs_free(ocs, page_data, SFP_PAGE_SIZE); return; } bytes_read = ocs_mgmt_get_sfp(ocs, 0xa0, page_data, SFP_PAGE_SIZE); if (bytes_read <= 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a0", "(unknown)"); } else { char *d = buf; uint8_t *s = page_data; int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1; int bytes_added; for (i = 0; i < bytes_read; i++) { bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s); ++s; d += bytes_added; buffer_remaining -= bytes_added; } *d = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a0", buf); } ocs_free(ocs, page_data, SFP_PAGE_SIZE); ocs_free(ocs, buf, (3 * SFP_PAGE_SIZE) + 1); } static void get_sfp_a2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint8_t *page_data; char *buf; int i; int32_t bytes_read; page_data = ocs_malloc(ocs, SFP_PAGE_SIZE, OCS_M_ZERO | OCS_M_NOWAIT); if (page_data == NULL) { return; } buf = ocs_malloc(ocs, (SFP_PAGE_SIZE * 3) + 1, OCS_M_ZERO | OCS_M_NOWAIT); if (buf == NULL) { ocs_free(ocs, page_data, SFP_PAGE_SIZE); return; } bytes_read = ocs_mgmt_get_sfp(ocs, 0xa2, page_data, SFP_PAGE_SIZE); if (bytes_read <= 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a2", "(unknown)"); } else { char *d = buf; uint8_t *s = page_data; int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1; int bytes_added; for (i=0; i < bytes_read; i++) { bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s); ++s; d += bytes_added; buffer_remaining -= bytes_added; } *d = '\0'; ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "sfp_a2", buf); } ocs_free(ocs, page_data, SFP_PAGE_SIZE); ocs_free(ocs, buf, (3 * SFP_PAGE_SIZE) + 1); } static void get_debug_mq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_mq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_MQ_DUMP)); } static void get_debug_cq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_cq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_CQ_DUMP)); } static void get_debug_wq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_wq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_WQ_DUMP)); } static void get_debug_eq_dump(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RW, "debug_eq_dump", ocs_debug_is_enabled(OCS_DEBUG_ENABLE_EQ_DUMP)); } static void get_logmask(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "logmask", "0x%02x", ocs->logmask); } static void get_loglevel(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "loglevel", "%d", loglevel); } static void get_current_speed(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_LINK_SPEED, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "current_speed", "%d", value); } static void get_configured_speed(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_LINK_CONFIG_SPEED, &value); if (value == 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_speed", "auto"); } else { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "configured_speed", "%d", value); } } static void get_current_topology(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_TOPOLOGY, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "current_topology", "%d", value); } static void get_configured_topology(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t value; ocs_hw_get(&(ocs->hw), OCS_HW_CONFIG_TOPOLOGY, &value); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "configured_topology", "%d", value); } static void get_current_link_state(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_stats_t value; if (ocs_xport_status(ocs->xport, OCS_XPORT_PORT_STATUS, &value) == 0) { if (value.value == OCS_XPORT_PORT_ONLINE) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_link_state", "online"); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_link_state", "offline"); } } } static void get_configured_link_state(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_stats_t value; if (ocs_xport_status(ocs->xport, OCS_XPORT_CONFIG_PORT_STATUS, &value) == 0) { if (value.value == OCS_XPORT_PORT_ONLINE) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_link_state", "online"); } else { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "configured_link_state", "offline"); } } } /** * @brief HW link config enum to mgmt string value mapping. * * This structure provides a mapping from the ocs_hw_linkcfg_e * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port * control) to the mgmt string that is passed in by the mgmt application * (elxsdkutil). */ typedef struct ocs_mgmt_linkcfg_map_s { ocs_hw_linkcfg_e linkcfg; const char *mgmt_str; } ocs_mgmt_linkcfg_map_t; static ocs_mgmt_linkcfg_map_t mgmt_linkcfg_map[] = { {OCS_HW_LINKCFG_4X10G, OCS_CONFIG_LINKCFG_4X10G}, {OCS_HW_LINKCFG_1X40G, OCS_CONFIG_LINKCFG_1X40G}, {OCS_HW_LINKCFG_2X16G, OCS_CONFIG_LINKCFG_2X16G}, {OCS_HW_LINKCFG_4X8G, OCS_CONFIG_LINKCFG_4X8G}, {OCS_HW_LINKCFG_4X1G, OCS_CONFIG_LINKCFG_4X1G}, {OCS_HW_LINKCFG_2X10G, OCS_CONFIG_LINKCFG_2X10G}, {OCS_HW_LINKCFG_2X10G_2X8G, OCS_CONFIG_LINKCFG_2X10G_2X8G}}; /** * @brief Get the HW linkcfg enum from the mgmt config string. * * @param mgmt_str mgmt string value. * * @return Returns the HW linkcfg enum corresponding to clp_str. */ static ocs_hw_linkcfg_e ocs_hw_linkcfg_from_mgmt(const char *mgmt_str) { uint32_t i; for (i = 0; i < ARRAY_SIZE(mgmt_linkcfg_map); i++) { if (ocs_strncmp(mgmt_linkcfg_map[i].mgmt_str, mgmt_str, ocs_strlen(mgmt_str)) == 0) { return mgmt_linkcfg_map[i].linkcfg; } } return OCS_HW_LINKCFG_NA; } /** * @brief Get the mgmt string value from the HW linkcfg enum. * * @param linkcfg HW linkcfg enum. * * @return Returns the mgmt string value corresponding to the given HW linkcfg. */ static const char * ocs_mgmt_from_hw_linkcfg(ocs_hw_linkcfg_e linkcfg) { uint32_t i; for (i = 0; i < ARRAY_SIZE(mgmt_linkcfg_map); i++) { if (mgmt_linkcfg_map[i].linkcfg == linkcfg) { return mgmt_linkcfg_map[i].mgmt_str; } } return OCS_CONFIG_LINKCFG_UNKNOWN; } /** * @brief Link configuration callback argument */ typedef struct ocs_mgmt_linkcfg_arg_s { ocs_sem_t semaphore; int32_t status; ocs_hw_linkcfg_e linkcfg; } ocs_mgmt_linkcfg_arg_t; /** * @brief Get linkcfg config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_linkcfg(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { const char *linkcfg_str = NULL; uint32_t value; ocs_hw_linkcfg_e linkcfg; ocs_hw_get(&ocs->hw, OCS_HW_LINKCFG, &value); linkcfg = (ocs_hw_linkcfg_e)value; linkcfg_str = ocs_mgmt_from_hw_linkcfg(linkcfg); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "linkcfg", linkcfg_str); } /** * @brief Get requested WWNN config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_req_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_t *xport = ocs->xport; ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "requested_wwnn", "0x%llx", (unsigned long long)xport->req_wwnn); } /** * @brief Get requested WWPN config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_req_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_xport_t *xport = ocs->xport; ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "requested_wwpn", "0x%llx", (unsigned long long)xport->req_wwpn); } /** * @brief Get requested nodedb_mask config value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param textbuf The textbuf to which the result is written. * * @return None. */ static void get_nodedb_mask(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RW, "nodedb_mask", "0x%08x", ocs->nodedb_mask); } /** * @brief Set requested WWNN value. * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the linkcfg is set. * * @return Returns 0 on success. */ int set_req_wwnn(ocs_t *ocs, char *name, char *value) { int rc; uint64_t wwnn; if (ocs_strcasecmp(value, "default") == 0) { wwnn = 0; } else if (parse_wwn(value, &wwnn) != 0) { ocs_log_test(ocs, "Invalid WWNN: %s\n", value); return 1; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_WWNN_SET, wwnn); if(rc) { ocs_log_test(ocs, "OCS_XPORT_WWNN_SET failed: %d\n", rc); return rc; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc) { ocs_log_test(ocs, "port offline failed : %d\n", rc); } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc) { ocs_log_test(ocs, "port online failed : %d\n", rc); } return rc; } /** * @brief Set requested WWNP value. * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the linkcfg is set. * * @return Returns 0 on success. */ int set_req_wwpn(ocs_t *ocs, char *name, char *value) { int rc; uint64_t wwpn; if (ocs_strcasecmp(value, "default") == 0) { wwpn = 0; } else if (parse_wwn(value, &wwpn) != 0) { ocs_log_test(ocs, "Invalid WWPN: %s\n", value); return 1; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_WWPN_SET, wwpn); if(rc) { ocs_log_test(ocs, "OCS_XPORT_WWPN_SET failed: %d\n", rc); return rc; } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (rc) { ocs_log_test(ocs, "port offline failed : %d\n", rc); } rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (rc) { ocs_log_test(ocs, "port online failed : %d\n", rc); } return rc; } /** * @brief Set node debug mask value * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the nodedb_mask is set. * * @return Returns 0 on success. */ static int set_nodedb_mask(ocs_t *ocs, char *name, char *value) { ocs->nodedb_mask = ocs_strtoul(value, 0, 0); return 0; } /** * @brief Set linkcfg config value. * * @param ocs Pointer to the ocs structure. * @param name Not used. * @param value Value to which the linkcfg is set. * * @return Returns 0 on success. */ static int set_linkcfg(ocs_t *ocs, char *name, char *value) { ocs_hw_linkcfg_e linkcfg; ocs_mgmt_linkcfg_arg_t cb_arg; ocs_hw_rtn_e status; ocs_sem_init(&cb_arg.semaphore, 0, "mgmt_linkcfg"); /* translate mgmt linkcfg string to HW linkcfg enum */ linkcfg = ocs_hw_linkcfg_from_mgmt(value); /* set HW linkcfg */ status = ocs_hw_port_control(&ocs->hw, OCS_HW_PORT_SET_LINK_CONFIG, (uintptr_t)linkcfg, ocs_mgmt_linkcfg_cb, &cb_arg); if (status) { ocs_log_test(ocs, "ocs_hw_set_linkcfg failed\n"); return -1; } if (ocs_sem_p(&cb_arg.semaphore, OCS_SEM_FOREVER)) { ocs_log_err(ocs, "ocs_sem_p failed\n"); return -1; } if (cb_arg.status) { ocs_log_test(ocs, "failed to set linkcfg from HW status=%d\n", cb_arg.status); return -1; } return 0; } /** * @brief Linkcfg callback * * @param status Result of the linkcfg get/set operation. * @param value Resulting linkcfg value. * @param arg Callback argument. * * @return None. */ static void ocs_mgmt_linkcfg_cb(int32_t status, uintptr_t value, void *arg) { ocs_mgmt_linkcfg_arg_t *cb_arg = (ocs_mgmt_linkcfg_arg_t *)arg; cb_arg->status = status; cb_arg->linkcfg = (ocs_hw_linkcfg_e)value; ocs_sem_v(&cb_arg->semaphore); } static int set_debug_mq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_MQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_MQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_debug_cq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_CQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_CQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_debug_wq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_WQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_WQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_debug_eq_dump(ocs_t *ocs, char *name, char *value) { int result; if (ocs_strcasecmp(value, "false") == 0) { ocs_debug_disable(OCS_DEBUG_ENABLE_EQ_DUMP); result = 0; } else if (ocs_strcasecmp(value, "true") == 0) { ocs_debug_enable(OCS_DEBUG_ENABLE_EQ_DUMP); result = 0; } else { result = -1; } return result; } static int set_logmask(ocs_t *ocs, char *name, char *value) { ocs->logmask = ocs_strtoul(value, NULL, 0); return 0; } static int set_loglevel(ocs_t *ocs, char *name, char *value) { loglevel = ocs_strtoul(value, NULL, 0); return 0; } int set_configured_speed(ocs_t *ocs, char *name, char *value) { int result = 0; ocs_hw_rtn_e hw_rc; int xport_rc; uint32_t spd; spd = ocs_strtoul(value, NULL, 0); if ((spd != 0) && (spd != 2000) && (spd != 4000) && (spd != 8000) && (spd != 16000) && (spd != 32000)) { ocs_log_test(ocs, "unsupported speed %d\n", spd); return 1; } ocs_log_debug(ocs, "Taking port offline\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Port offline failed\n"); result = 1; } else { ocs_log_debug(ocs, "Setting port to speed %d\n", spd); hw_rc = ocs_hw_set(&ocs->hw, OCS_HW_LINK_SPEED, spd); if (hw_rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(ocs, "Speed set failed\n"); result = 1; } /* If we failed to set the speed we still want to try to bring * the port back online */ ocs_log_debug(ocs, "Bringing port online\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (xport_rc != 0) { result = 1; } } return result; } int set_configured_topology(ocs_t *ocs, char *name, char *value) { int result = 0; ocs_hw_rtn_e hw_rc; int xport_rc; uint32_t topo; topo = ocs_strtoul(value, NULL, 0); if (topo >= OCS_HW_TOPOLOGY_NONE) { return 1; } ocs_log_debug(ocs, "Taking port offline\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Port offline failed\n"); result = 1; } else { ocs_log_debug(ocs, "Setting port to topology %d\n", topo); hw_rc = ocs_hw_set(&ocs->hw, OCS_HW_TOPOLOGY, topo); if (hw_rc != OCS_HW_RTN_SUCCESS) { ocs_log_test(ocs, "Topology set failed\n"); result = 1; } /* If we failed to set the topology we still want to try to bring * the port back online */ ocs_log_debug(ocs, "Bringing port online\n"); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (xport_rc != 0) { result = 1; } } return result; } static int set_configured_link_state(ocs_t *ocs, char *name, char *value) { int result = 0; int xport_rc; if (ocs_strcasecmp(value, "offline") == 0) { ocs_log_debug(ocs, "Setting port to %s\n", value); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Setting port to offline failed\n"); result = -1; } } else if (ocs_strcasecmp(value, "online") == 0) { ocs_log_debug(ocs, "Setting port to %s\n", value); xport_rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE); if (xport_rc != 0) { ocs_log_test(ocs, "Setting port to online failed\n"); result = -1; } } else { ocs_log_test(ocs, "Unsupported link state \"%s\"\n", value); result = -1; } return result; } typedef struct ocs_mgmt_get_port_protocol_result { ocs_sem_t semaphore; int32_t status; ocs_hw_port_protocol_e port_protocol; } ocs_mgmt_get_port_protocol_result_t; static void ocs_mgmt_get_port_protocol_cb(int32_t status, ocs_hw_port_protocol_e port_protocol, void *arg) { ocs_mgmt_get_port_protocol_result_t *result = arg; result->status = status; result->port_protocol = port_protocol; ocs_sem_v(&(result->semaphore)); } static void get_port_protocol(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_get_port_protocol_result_t result; uint8_t bus; uint8_t dev; uint8_t func; ocs_sem_init(&(result.semaphore), 0, "get_port_protocol"); ocs_get_bus_dev_func(ocs, &bus, &dev, &func); if(ocs_hw_get_port_protocol(&ocs->hw, func, ocs_mgmt_get_port_protocol_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); } if (result.status == 0) { switch (result.port_protocol) { case OCS_HW_PORT_PROTOCOL_ISCSI: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "iSCSI"); break; case OCS_HW_PORT_PROTOCOL_FCOE: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "FCoE"); break; case OCS_HW_PORT_PROTOCOL_FC: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "FC"); break; case OCS_HW_PORT_PROTOCOL_OTHER: ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "Other"); break; } } else { ocs_log_test(ocs, "getting port profile status 0x%x\n", result.status); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "port_protocol", "Unknown"); } } } typedef struct ocs_mgmt_set_port_protocol_result { ocs_sem_t semaphore; int32_t status; } ocs_mgmt_set_port_protocol_result_t; static void ocs_mgmt_set_port_protocol_cb(int32_t status, void *arg) { ocs_mgmt_get_port_protocol_result_t *result = arg; result->status = status; ocs_sem_v(&(result->semaphore)); } /** * @brief Set port protocol * @par Description * This is a management action handler to set the current * port protocol. Input value should be one of iSCSI, * FC, or FCoE. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param value The value to be assigned * * @return Returns 0 on success, non-zero on failure. */ static int32_t set_port_protocol(ocs_t *ocs, char *name, char *value) { ocs_mgmt_set_port_protocol_result_t result; int32_t rc = 0; ocs_hw_port_protocol_e new_protocol; uint8_t bus; uint8_t dev; uint8_t func; ocs_get_bus_dev_func(ocs, &bus, &dev, &func); ocs_sem_init(&(result.semaphore), 0, "set_port_protocol"); if (ocs_strcasecmp(value, "iscsi") == 0) { new_protocol = OCS_HW_PORT_PROTOCOL_ISCSI; } else if (ocs_strcasecmp(value, "fc") == 0) { new_protocol = OCS_HW_PORT_PROTOCOL_FC; } else if (ocs_strcasecmp(value, "fcoe") == 0) { new_protocol = OCS_HW_PORT_PROTOCOL_FCOE; } else { return -1; } rc = ocs_hw_set_port_protocol(&ocs->hw, new_protocol, func, ocs_mgmt_set_port_protocol_cb, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return -ENXIO; } if (result.status == 0) { /* Success. */ rc = 0; } else { rc = -1; ocs_log_test(ocs, "setting active profile status 0x%x\n", result.status); } } return rc; } typedef struct ocs_mgmt_get_profile_list_result_s { ocs_sem_t semaphore; int32_t status; ocs_hw_profile_list_t *list; } ocs_mgmt_get_profile_list_result_t; static void ocs_mgmt_get_profile_list_cb(int32_t status, ocs_hw_profile_list_t *list, void *ul_arg) { ocs_mgmt_get_profile_list_result_t *result = ul_arg; result->status = status; result->list = list; ocs_sem_v(&(result->semaphore)); } /** * @brief Get list of profiles * @par Description * This is a management action handler to get the list of * profiles supported by the SLI port. Although the spec says * that all SLI platforms support this, only Skyhawk actually * has a useful implementation. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_profile_list(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { ocs_mgmt_get_profile_list_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_profile_list"); if(ocs_hw_get_profile_list(&ocs->hw, ocs_mgmt_get_profile_list_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); } if (result.status == 0) { /* Success. */ #define MAX_LINE_SIZE 520 #define BUFFER_SIZE MAX_LINE_SIZE*40 char *result_buf; char result_line[MAX_LINE_SIZE]; uint32_t bytes_left; uint32_t i; result_buf = ocs_malloc(ocs, BUFFER_SIZE, OCS_M_ZERO); bytes_left = BUFFER_SIZE; for (i=0; inum_descriptors; i++) { sprintf(result_line, "0x%02x:%s\n", result.list->descriptors[i].profile_id, result.list->descriptors[i].profile_description); if (strlen(result_line) < bytes_left) { strcat(result_buf, result_line); bytes_left -= strlen(result_line); } } ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "profile_list", result_buf); ocs_free(ocs, result_buf, BUFFER_SIZE); ocs_free(ocs, result.list, sizeof(ocs_hw_profile_list_t)); } else { ocs_log_test(ocs, "getting profile list status 0x%x\n", result.status); } } } typedef struct ocs_mgmt_get_active_profile_result { ocs_sem_t semaphore; int32_t status; uint32_t active_profile_id; } ocs_mgmt_get_active_profile_result_t; static void ocs_mgmt_get_active_profile_cb(int32_t status, uint32_t active_profile, void *ul_arg) { ocs_mgmt_get_active_profile_result_t *result = ul_arg; result->status = status; result->active_profile_id = active_profile; ocs_sem_v(&(result->semaphore)); } #define MAX_PROFILE_LENGTH 5 /** * @brief Get active profile * @par Description * This is a management action handler to get the currently * active profile for an SLI port. Although the spec says that * all SLI platforms support this, only Skyhawk actually has a * useful implementation. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_active_profile(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char result_string[MAX_PROFILE_LENGTH]; ocs_mgmt_get_active_profile_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_active_profile"); if(ocs_hw_get_active_profile(&ocs->hw, ocs_mgmt_get_active_profile_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); } if (result.status == 0) { /* Success. */ sprintf(result_string, "0x%02x", result.active_profile_id); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "active_profile", result_string); } else { ocs_log_test(ocs, "getting active profile status 0x%x\n", result.status); } } } typedef struct ocs_mgmt_set_active_profile_result { ocs_sem_t semaphore; int32_t status; } ocs_mgmt_set_active_profile_result_t; static void ocs_mgmt_set_active_profile_cb(int32_t status, void *ul_arg) { ocs_mgmt_get_profile_list_result_t *result = ul_arg; result->status = status; ocs_sem_v(&(result->semaphore)); } /** * @brief Set active profile * @par Description * This is a management action handler to set the currently * active profile for an SLI port. Although the spec says that * all SLI platforms support this, only Skyhawk actually has a * useful implementation. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param value Requested new value of the property. * * @return Returns 0 on success, non-zero on failure. */ static int32_t set_active_profile(ocs_t *ocs, char *name, char *value) { ocs_mgmt_set_active_profile_result_t result; int32_t rc = 0; int32_t new_profile; new_profile = ocs_strtoul(value, NULL, 0); ocs_sem_init(&(result.semaphore), 0, "set_active_profile"); rc = ocs_hw_set_active_profile(&ocs->hw, ocs_mgmt_set_active_profile_cb, new_profile, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return -ENXIO; } if (result.status == 0) { /* Success. */ rc = 0; } else { rc = -1; ocs_log_test(ocs, "setting active profile status 0x%x\n", result.status); } } return rc; } typedef struct ocs_mgmt_get_nvparms_result { ocs_sem_t semaphore; int32_t status; uint8_t wwpn[8]; uint8_t wwnn[8]; uint8_t hard_alpa; uint32_t preferred_d_id; } ocs_mgmt_get_nvparms_result_t; static void ocs_mgmt_get_nvparms_cb(int32_t status, uint8_t *wwpn, uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void *ul_arg) { ocs_mgmt_get_nvparms_result_t *result = ul_arg; result->status = status; ocs_memcpy(result->wwpn, wwpn, sizeof(result->wwpn)); ocs_memcpy(result->wwnn, wwnn, sizeof(result->wwnn)); result->hard_alpa = hard_alpa; result->preferred_d_id = preferred_d_id; ocs_sem_v(&(result->semaphore)); } /** * @brief Get wwpn * @par Description * * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_nv_wwpn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char result_string[24]; ocs_mgmt_get_nvparms_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_nv_wwpn"); if(ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return; } if (result.status == 0) { /* Success. Copy wwpn from result struct to result string */ sprintf(result_string, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", result.wwpn[0], result.wwpn[1], result.wwpn[2], result.wwpn[3], result.wwpn[4], result.wwpn[5], result.wwpn[6], result.wwpn[7]); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "nv_wwpn", result_string); } else { ocs_log_test(ocs, "getting wwpn status 0x%x\n", result.status); } } } /** * @brief Get wwnn * @par Description * * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return none */ static void get_nv_wwnn(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { char result_string[24]; ocs_mgmt_get_nvparms_result_t result; ocs_sem_init(&(result.semaphore), 0, "get_nv_wwnn"); if(ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result) == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return; } if (result.status == 0) { /* Success. Copy wwnn from result struct to result string */ ocs_snprintf(result_string, sizeof(result_string), "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", result.wwnn[0], result.wwnn[1], result.wwnn[2], result.wwnn[3], result.wwnn[4], result.wwnn[5], result.wwnn[6], result.wwnn[7]); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RW, "nv_wwnn", result_string); } else { ocs_log_test(ocs, "getting wwnn status 0x%x\n", result.status); } } } /** * @brief Get accumulated node abort counts * @par Description Get the sum of all nodes abort count. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param textbuf Pointer to an ocs_textbuf, which is used to return the results. * * @return None. */ static void get_node_abort_cnt(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf) { uint32_t abort_counts = 0; ocs_domain_t *domain; ocs_sport_t *sport; ocs_node_t *node; if (ocs_device_lock_try(ocs) != TRUE) { /* Didn't get the lock */ return; } /* Here the Device lock is held */ ocs_list_foreach(&ocs->domain_list, domain) { if (ocs_domain_lock_try(domain) != TRUE) { /* Didn't get the lock */ ocs_device_unlock(ocs); return; } /* Here the Domain lock is held */ ocs_list_foreach(&domain->sport_list, sport) { if (ocs_sport_lock_try(sport) != TRUE) { /* Didn't get the lock */ ocs_domain_unlock(domain); ocs_device_unlock(ocs); return; } /* Here the sport lock is held */ ocs_list_foreach(&sport->node_list, node) { abort_counts += node->abort_cnt; } ocs_sport_unlock(sport); } ocs_domain_unlock(domain); } ocs_device_unlock(ocs); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "node_abort_cnt", "%d" , abort_counts); } typedef struct ocs_mgmt_set_nvparms_result { ocs_sem_t semaphore; int32_t status; } ocs_mgmt_set_nvparms_result_t; static void ocs_mgmt_set_nvparms_cb(int32_t status, void *ul_arg) { ocs_mgmt_get_profile_list_result_t *result = ul_arg; result->status = status; ocs_sem_v(&(result->semaphore)); } /** * @brief Set wwn * @par Description Sets the Non-volatile worldwide names, * if provided. * * @param ocs Pointer to the ocs structure. * @param name Name of the action being performed. * @param wwn_p Requested new WWN values. * * @return Returns 0 on success, non-zero on failure. */ static int32_t set_nv_wwn(ocs_t *ocs, char *name, char *wwn_p) { ocs_mgmt_get_nvparms_result_t result; uint8_t new_wwpn[8]; uint8_t new_wwnn[8]; char *wwpn_p = NULL; char *wwnn_p = NULL; int32_t rc = -1; int wwpn = 0; int wwnn = 0; int i; /* This is a read-modify-write operation, so first we have to read * the current values */ ocs_sem_init(&(result.semaphore), 0, "set_nv_wwn1"); rc = ocs_hw_get_nvparms(&ocs->hw, ocs_mgmt_get_nvparms_cb, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return -ENXIO; } if (result.status != 0) { ocs_log_test(ocs, "getting nvparms status 0x%x\n", result.status); return -1; } } /* wwn_p contains wwpn_p@wwnn_p values */ if (wwn_p != NULL) { wwpn_p = ocs_strsep(&wwn_p, "@"); wwnn_p = wwn_p; } if (wwpn_p != NULL) { wwpn = ocs_strcmp(wwpn_p, "NA"); } if (wwnn_p != NULL) { wwnn = ocs_strcmp(wwnn_p, "NA"); } /* Parse the new WWPN */ if ((wwpn_p != NULL) && (wwpn != 0)) { if (ocs_sscanf(wwpn_p, "%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &(new_wwpn[0]), &(new_wwpn[1]), &(new_wwpn[2]), &(new_wwpn[3]), &(new_wwpn[4]), &(new_wwpn[5]), &(new_wwpn[6]), &(new_wwpn[7])) != 8) { ocs_log_test(ocs, "can't parse WWPN %s\n", wwpn_p); return -1; } } /* Parse the new WWNN */ if ((wwnn_p != NULL) && (wwnn != 0 )) { if (ocs_sscanf(wwnn_p, "%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &(new_wwnn[0]), &(new_wwnn[1]), &(new_wwnn[2]), &(new_wwnn[3]), &(new_wwnn[4]), &(new_wwnn[5]), &(new_wwnn[6]), &(new_wwnn[7])) != 8) { ocs_log_test(ocs, "can't parse WWNN %s\n", wwnn_p); return -1; } } for (i = 0; i < 8; i++) { /* Use active wwpn, if new one is not provided */ if (wwpn == 0) { new_wwpn[i] = result.wwpn[i]; } /* Use active wwnn, if new one is not provided */ if (wwnn == 0) { new_wwnn[i] = result.wwnn[i]; } } /* Modify the nv_wwnn and nv_wwpn, then write it back */ ocs_sem_init(&(result.semaphore), 0, "set_nv_wwn2"); rc = ocs_hw_set_nvparms(&ocs->hw, ocs_mgmt_set_nvparms_cb, new_wwpn, new_wwnn, result.hard_alpa, result.preferred_d_id, &result); if (rc == OCS_HW_RTN_SUCCESS) { if (ocs_sem_p(&(result.semaphore), OCS_SEM_FOREVER) != 0) { /* Undefined failure */ ocs_log_err(ocs, "ocs_sem_p failed\n"); return -ENXIO; } if (result.status != 0) { ocs_log_test(ocs, "setting wwn status 0x%x\n", result.status); return -1; } } return rc; } static int set_tgt_rscn_delay(ocs_t *ocs, char *name, char *value) { ocs->tgt_rscn_delay_msec = ocs_strtoul(value, NULL, 0) * 1000; ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_tgt_rscn_period(ocs_t *ocs, char *name, char *value) { ocs->tgt_rscn_period_msec = ocs_strtoul(value, NULL, 0) * 1000; ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_drop_cmd(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_CMD); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_free_drop_cmd(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_FREE_DROPPED); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_drop_data(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_DATA); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_inject_drop_resp(ocs_t *ocs, char *name, char *value) { ocs->err_injection = (ocs_strtoul(value, NULL, 0) == 0 ? NO_ERR_INJECT : INJECT_DROP_RESP); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_cmd_err_inject(ocs_t *ocs, char *name, char *value) { ocs->cmd_err_inject = ocs_strtoul(value, NULL, 0); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } static int set_cmd_delay_value(ocs_t *ocs, char *name, char *value) { ocs->delay_value_msec = ocs_strtoul(value, NULL, 0); ocs->err_injection = (ocs->delay_value_msec == 0 ? NO_ERR_INJECT : INJECT_DELAY_CMD); ocs_log_debug(ocs, "mgmt set: %s %s\n", name, value); return 0; } /** * @brief parse a WWN from a string into a 64-bit value * * Given a pointer to a string, parse the string into a 64-bit * WWN value. The format of the string must be xx:xx:xx:xx:xx:xx:xx:xx * * @param wwn_in pointer to the string to be parsed * @param wwn_out pointer to uint64_t in which to put the parsed result * * @return 0 if successful, non-zero if the WWN is malformed and couldn't be parsed */ int parse_wwn(char *wwn_in, uint64_t *wwn_out) { uint8_t byte0; uint8_t byte1; uint8_t byte2; uint8_t byte3; uint8_t byte4; uint8_t byte5; uint8_t byte6; uint8_t byte7; int rc; rc = ocs_sscanf(wwn_in, "0x%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx", &byte0, &byte1, &byte2, &byte3, &byte4, &byte5, &byte6, &byte7); if (rc == 8) { *wwn_out = ((uint64_t)byte0 << 56) | ((uint64_t)byte1 << 48) | ((uint64_t)byte2 << 40) | ((uint64_t)byte3 << 32) | ((uint64_t)byte4 << 24) | ((uint64_t)byte5 << 16) | ((uint64_t)byte6 << 8) | ((uint64_t)byte7); return 0; } else { return 1; } } static char *mode_string(int mode); /** * @ingroup mgmt * @brief Generate the beginning of a numbered section in a management XML document. * * @par Description * This function begins a section. The XML information is appended to * the textbuf. This form of the function is used for sections that might have * multiple instances, such as a node or a SLI Port (sport). The index number * is appended to the name. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * @param index Index number of this instance of the section. * * @return None. */ extern void ocs_mgmt_start_section(ocs_textbuf_t *textbuf, const char *name, int index) { ocs_textbuf_printf(textbuf, "<%s instance=\"%d\">\n", name, index); } /** * @ingroup mgmt * @brief Generate the beginning of an unnumbered section in a management XML document. * * @par Description * This function begins a section. The XML information is appended to * the textbuf. This form of the function is used for sections that have * a single instance only. Therefore, no index number is needed. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * * @return None. */ extern void ocs_mgmt_start_unnumbered_section(ocs_textbuf_t *textbuf, const char *name) { ocs_textbuf_printf(textbuf, "<%s>\n", name); } /** * @ingroup mgmt * @brief Generate the end of a section in a management XML document. * * @par Description * This function ends a section. The XML information is appended to * the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * * @return None. */ void ocs_mgmt_end_unnumbered_section(ocs_textbuf_t *textbuf, const char *name) { ocs_textbuf_printf(textbuf, "\n", name); } /** * @ingroup mgmt * @brief Generate the indexed end of a section in a management XML document. * * @par Description * This function ends a section. The XML information is appended to * the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param name Name of the section. * @param index Index number of this instance of the section. * * @return None. */ void ocs_mgmt_end_section(ocs_textbuf_t *textbuf, const char *name, int index) { ocs_textbuf_printf(textbuf, "\n", name); } /** * @ingroup mgmt * @brief Generate a property, with no value, in a management XML document. * * @par Description * This function generates a property name. The XML information is appended to * the textbuf. This form of the function is used by the list functions * when the property name only (and not the current value) is given. * * @param textbuf Pointer to the driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * * @return None. */ void ocs_mgmt_emit_property_name(ocs_textbuf_t *textbuf, int mode, const char *name) { ocs_textbuf_printf(textbuf, "<%s mode=\"%s\"/>\n", name, mode_string(mode)); } /** * @ingroup mgmt * @brief Generate a property with a string value in a management XML document. * * @par Description * This function generates a property name and a string value. * The XML information is appended to the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * @param value Value of the property. * * @return None. */ void ocs_mgmt_emit_string(ocs_textbuf_t *textbuf, int mode, const char *name, const char *value) { ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s\n", name, mode_string(mode), value, name); } /** * @ingroup mgmt * @brief Generate a property with an integer value in a management XML document. * * @par Description * This function generates a property name and an integer value. * The XML information is appended to the textbuf. * * @param textbuf Pointer to driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * @param fmt A printf format for formatting the integer value. * * @return none */ void ocs_mgmt_emit_int(ocs_textbuf_t *textbuf, int mode, const char *name, const char *fmt, ...) { va_list ap; char valuebuf[64]; va_start(ap, fmt); ocs_vsnprintf(valuebuf, sizeof(valuebuf), fmt, ap); va_end(ap); ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s\n", name, mode_string(mode), valuebuf, name); } /** * @ingroup mgmt * @brief Generate a property with a boolean value in a management XML document. * * @par Description * This function generates a property name and a boolean value. * The XML information is appended to the textbuf. * * @param textbuf Pointer to the driver dump text buffer. * @param mode Defines whether the property is read(r)/write(w)/executable(x). * @param name Name of the property. * @param value Boolean value to be added to the textbuf. * * @return None. */ void ocs_mgmt_emit_boolean(ocs_textbuf_t *textbuf, int mode, const char *name, int value) { char *valuebuf = value ? "true" : "false"; ocs_textbuf_printf(textbuf, "<%s mode=\"%s\">%s\n", name, mode_string(mode), valuebuf, name); } static char *mode_string(int mode) { static char mode_str[4]; mode_str[0] = '\0'; if (mode & MGMT_MODE_RD) { strcat(mode_str, "r"); } if (mode & MGMT_MODE_WR) { strcat(mode_str, "w"); } if (mode & MGMT_MODE_EX) { strcat(mode_str, "x"); } return mode_str; } Index: head/sys/dev/ocs_fc/ocs_node.c =================================================================== --- head/sys/dev/ocs_fc/ocs_node.c (revision 359440) +++ head/sys/dev/ocs_fc/ocs_node.c (revision 359441) @@ -1,2376 +1,2376 @@ /*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * OCS driver remote node handler. This file contains code that is shared * between fabric (ocs_fabric.c) and device (ocs_device.c) nodes. */ /*! * @defgroup node_common Node common support * @defgroup node_alloc Node allocation */ #include "ocs.h" #include "ocs_els.h" #include "ocs_device.h" #define SCSI_IOFMT "[%04x][i:%0*x t:%0*x h:%04x]" #define SCSI_ITT_SIZE(ocs) ((ocs->ocs_xport == OCS_XPORT_FC) ? 4 : 8) #define SCSI_IOFMT_ARGS(io) io->instance_index, SCSI_ITT_SIZE(io->ocs), io->init_task_tag, SCSI_ITT_SIZE(io->ocs), io->tgt_task_tag, io->hw_tag #define scsi_io_printf(io, fmt, ...) ocs_log_debug(io->ocs, "[%s]" SCSI_IOFMT fmt, \ io->node->display_name, SCSI_IOFMT_ARGS(io), ##__VA_ARGS__) void ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *node); void ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *node); int ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *node); int ocs_mgmt_node_set(char *parent, char *name, char *value, void *node); int ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length, void *node); static ocs_mgmt_functions_t node_mgmt_functions = { .get_list_handler = ocs_mgmt_node_list, .get_handler = ocs_mgmt_node_get, .get_all_handler = ocs_mgmt_node_get_all, .set_handler = ocs_mgmt_node_set, .exec_handler = ocs_mgmt_node_exec, }; /** * @ingroup node_common * @brief Device node state machine wait for all ELS's to * complete * * Abort all ELS's for given node. * * @param node node for which ELS's will be aborted */ void ocs_node_abort_all_els(ocs_node_t *node) { ocs_io_t *els; ocs_io_t *els_next; ocs_node_cb_t cbdata = {0}; ocs_node_hold_frames(node); ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) { ocs_log_debug(node->ocs, "[%s] initiate ELS abort %s\n", node->display_name, els->display_name); ocs_unlock(&node->active_ios_lock); cbdata.els = els; ocs_els_post_event(els, OCS_EVT_ABORT_ELS, &cbdata); ocs_lock(&node->active_ios_lock); } ocs_unlock(&node->active_ios_lock); } /** * @ingroup node_common * @brief Handle remote node events from HW * * Handle remote node events from HW. Essentially the HW event is translated into * a node state machine event that is posted to the affected node. * * @param arg pointer to ocs * @param event HW event to proceoss * @param data application specific data (pointer to the affected node) * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_remote_node_cb(void *arg, ocs_hw_remote_node_event_e event, void *data) { ocs_t *ocs = arg; ocs_sm_event_t sm_event = OCS_EVT_LAST; ocs_remote_node_t *rnode = data; ocs_node_t *node = rnode->node; switch (event) { case OCS_HW_NODE_ATTACH_OK: sm_event = OCS_EVT_NODE_ATTACH_OK; break; case OCS_HW_NODE_ATTACH_FAIL: sm_event = OCS_EVT_NODE_ATTACH_FAIL; break; case OCS_HW_NODE_FREE_OK: sm_event = OCS_EVT_NODE_FREE_OK; break; case OCS_HW_NODE_FREE_FAIL: sm_event = OCS_EVT_NODE_FREE_FAIL; break; default: ocs_log_test(ocs, "unhandled event %#x\n", event); return -1; } /* If we're using HLM, forward the NODE_ATTACH_OK/FAIL event to all nodes in the node group */ if ((node->node_group != NULL) && ((sm_event == OCS_EVT_NODE_ATTACH_OK) || (sm_event == OCS_EVT_NODE_ATTACH_FAIL))) { ocs_node_t *n = NULL; uint8_t attach_ok = sm_event == OCS_EVT_NODE_ATTACH_OK; ocs_sport_lock(node->sport); { ocs_list_foreach(&node->sport->node_list, n) { if (node == n) { continue; } ocs_node_lock(n); if ((!n->rnode.attached) && (node->node_group == n->node_group)) { n->rnode.attached = attach_ok; node_printf(n, "rpi[%d] deferred HLM node attach %s posted\n", n->rnode.index, attach_ok ? "ok" : "fail"); ocs_node_post_event(n, sm_event, NULL); } ocs_node_unlock(n); } } ocs_sport_unlock(node->sport); } ocs_node_post_event(node, sm_event, NULL); return 0; } /** * @ingroup node_alloc * @brief Find an FC node structure given the FC port ID * * @param sport the SPORT to search * @param port_id FC port ID * * @return pointer to the object or NULL if not found */ ocs_node_t * ocs_node_find(ocs_sport_t *sport, uint32_t port_id) { ocs_node_t *node; ocs_assert(sport->lookup, NULL); ocs_sport_lock(sport); node = spv_get(sport->lookup, port_id); ocs_sport_unlock(sport); return node; } /** * @ingroup node_alloc * @brief Find an FC node structure given the WWPN * * @param sport the SPORT to search * @param wwpn the WWPN to search for (host endian) * * @return pointer to the object or NULL if not found */ ocs_node_t * ocs_node_find_wwpn(ocs_sport_t *sport, uint64_t wwpn) { - ocs_node_t *node = NULL;; + ocs_node_t *node = NULL; ocs_assert(sport, NULL); ocs_sport_lock(sport); ocs_list_foreach(&sport->node_list, node) { if (ocs_node_get_wwpn(node) == wwpn) { ocs_sport_unlock(sport); return node; } } ocs_sport_unlock(sport); return NULL; } /** * @ingroup node_alloc * @brief allocate node object pool * * A pool of ocs_node_t objects is allocated. * * @param ocs pointer to driver instance context * @param node_count count of nodes to allocate * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_node_create_pool(ocs_t *ocs, uint32_t node_count) { ocs_xport_t *xport = ocs->xport; uint32_t i; ocs_node_t *node; uint32_t max_sge; uint32_t num_sgl; uint64_t max_xfer_size; int32_t rc; xport->nodes_count = node_count; xport->nodes = ocs_malloc(ocs, node_count * sizeof(ocs_node_t *), OCS_M_ZERO | OCS_M_NOWAIT); if (xport->nodes == NULL) { ocs_log_err(ocs, "node ptrs allocation failed"); return -1; } if (0 == ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge) && 0 == ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &num_sgl)) { max_xfer_size = (max_sge * (uint64_t)num_sgl); } else { max_xfer_size = 65536; } if (max_xfer_size > 65536) max_xfer_size = 65536; ocs_list_init(&xport->nodes_free_list, ocs_node_t, link); for (i = 0; i < node_count; i ++) { node = ocs_malloc(ocs, sizeof(ocs_node_t), OCS_M_ZERO | OCS_M_NOWAIT); if (node == NULL) { ocs_log_err(ocs, "node allocation failed"); goto error; } /* Assign any persistent field values */ node->instance_index = i; node->max_wr_xfer_size = max_xfer_size; node->rnode.indicator = UINT32_MAX; rc = ocs_dma_alloc(ocs, &node->sparm_dma_buf, 256, 16); if (rc) { ocs_free(ocs, node, sizeof(ocs_node_t)); ocs_log_err(ocs, "ocs_dma_alloc failed: %d\n", rc); goto error; } xport->nodes[i] = node; ocs_list_add_tail(&xport->nodes_free_list, node); } return 0; error: ocs_node_free_pool(ocs); return -1; } /** * @ingroup node_alloc * @brief free node object pool * * The pool of previously allocated node objects is freed * * @param ocs pointer to driver instance context * * @return none */ void ocs_node_free_pool(ocs_t *ocs) { ocs_xport_t *xport = ocs->xport; ocs_node_t *node; uint32_t i; if (!xport->nodes) return; ocs_device_lock(ocs); for (i = 0; i < xport->nodes_count; i ++) { node = xport->nodes[i]; if (node) { /* free sparam_dma_buf */ ocs_dma_free(ocs, &node->sparm_dma_buf); ocs_free(ocs, node, sizeof(ocs_node_t)); } xport->nodes[i] = NULL; } ocs_free(ocs, xport->nodes, (xport->nodes_count * sizeof(ocs_node_t *))); ocs_device_unlock(ocs); } /** * @ingroup node_alloc * @brief return pointer to node object given instance index * * A pointer to the node object given by an instance index is returned. * * @param ocs pointer to driver instance context * @param index instance index * * @return returns pointer to node object, or NULL */ ocs_node_t * ocs_node_get_instance(ocs_t *ocs, uint32_t index) { ocs_xport_t *xport = ocs->xport; ocs_node_t *node = NULL; if (index >= (xport->nodes_count)) { ocs_log_test(ocs, "invalid index: %d\n", index); return NULL; } node = xport->nodes[index]; return node->attached ? node : NULL; } /** * @ingroup node_alloc * @brief Allocate an fc node structure and add to node list * * @param sport pointer to the SPORT from which this node is allocated * @param port_id FC port ID of new node * @param init Port is an inititiator (sent a plogi) * @param targ Port is potentially a target * * @return pointer to the object or NULL if none available */ ocs_node_t * ocs_node_alloc(ocs_sport_t *sport, uint32_t port_id, uint8_t init, uint8_t targ) { int32_t rc; ocs_node_t *node = NULL; uint32_t instance_index; uint32_t max_wr_xfer_size; ocs_t *ocs = sport->ocs; ocs_xport_t *xport = ocs->xport; ocs_dma_t sparm_dma_buf; ocs_assert(sport, NULL); if (sport->shutting_down) { ocs_log_debug(ocs, "node allocation when shutting down %06x", port_id); return NULL; } ocs_device_lock(ocs); node = ocs_list_remove_head(&xport->nodes_free_list); ocs_device_unlock(ocs); if (node == NULL) { ocs_log_err(ocs, "node allocation failed %06x", port_id); return NULL; } /* Save persistent values across memset zero */ instance_index = node->instance_index; max_wr_xfer_size = node->max_wr_xfer_size; sparm_dma_buf = node->sparm_dma_buf; ocs_memset(node, 0, sizeof(*node)); node->instance_index = instance_index; node->max_wr_xfer_size = max_wr_xfer_size; node->sparm_dma_buf = sparm_dma_buf; node->rnode.indicator = UINT32_MAX; node->sport = sport; ocs_sport_lock(sport); node->ocs = ocs; node->init = init; node->targ = targ; rc = ocs_hw_node_alloc(&ocs->hw, &node->rnode, port_id, sport); if (rc) { ocs_log_err(ocs, "ocs_hw_node_alloc failed: %d\n", rc); ocs_sport_unlock(sport); /* Return back to pool. */ ocs_device_lock(ocs); ocs_list_add_tail(&xport->nodes_free_list, node); ocs_device_unlock(ocs); return NULL; } ocs_list_add_tail(&sport->node_list, node); ocs_node_lock_init(node); ocs_lock_init(ocs, &node->pend_frames_lock, "pend_frames_lock[%d]", node->instance_index); ocs_list_init(&node->pend_frames, ocs_hw_sequence_t, link); ocs_lock_init(ocs, &node->active_ios_lock, "active_ios[%d]", node->instance_index); ocs_list_init(&node->active_ios, ocs_io_t, link); ocs_list_init(&node->els_io_pend_list, ocs_io_t, link); ocs_list_init(&node->els_io_active_list, ocs_io_t, link); ocs_scsi_io_alloc_enable(node); /* zero the service parameters */ ocs_memset(node->sparm_dma_buf.virt, 0, node->sparm_dma_buf.size); node->rnode.node = node; node->sm.app = node; node->evtdepth = 0; ocs_node_update_display_name(node); spv_set(sport->lookup, port_id, node); ocs_sport_unlock(sport); node->mgmt_functions = &node_mgmt_functions; return node; } /** * @ingroup node_alloc * @brief free a node structure * * The node structure given by 'node' is free'd * * @param node the node to free * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_node_free(ocs_node_t *node) { ocs_sport_t *sport; ocs_t *ocs; ocs_xport_t *xport; ocs_hw_rtn_e rc = 0; ocs_node_t *ns = NULL; int post_all_free = FALSE; ocs_assert(node, -1); ocs_assert(node->sport, -1); ocs_assert(node->ocs, -1); sport = node->sport; ocs_assert(sport, -1); ocs = node->ocs; ocs_assert(ocs->xport, -1); xport = ocs->xport; node_printf(node, "Free'd\n"); if(node->refound) { /* * Save the name server node. We will send fake RSCN event at * the end to handle ignored RSCN event during node deletion */ ns = ocs_node_find(node->sport, FC_ADDR_NAMESERVER); } /* Remove from node list */ ocs_sport_lock(sport); ocs_list_remove(&sport->node_list, node); /* Free HW resources */ if (OCS_HW_RTN_IS_ERROR((rc = ocs_hw_node_free_resources(&ocs->hw, &node->rnode)))) { ocs_log_test(ocs, "ocs_hw_node_free failed: %d\n", rc); rc = -1; } /* if the gidpt_delay_timer is still running, then delete it */ if (ocs_timer_pending(&node->gidpt_delay_timer)) { ocs_del_timer(&node->gidpt_delay_timer); } if (node->fcp2device) { ocs_del_crn(node); } /* remove entry from sparse vector list */ if (sport->lookup == NULL) { ocs_log_test(node->ocs, "assertion failed: sport lookup is NULL\n"); ocs_sport_unlock(sport); return -1; } spv_set(sport->lookup, node->rnode.fc_id, NULL); /* * If the node_list is empty, then post a ALL_CHILD_NODES_FREE event to the sport, * after the lock is released. The sport may be free'd as a result of the event. */ if (ocs_list_empty(&sport->node_list)) { post_all_free = TRUE; } ocs_sport_unlock(sport); if (post_all_free) { ocs_sm_post_event(&sport->sm, OCS_EVT_ALL_CHILD_NODES_FREE, NULL); } node->sport = NULL; node->sm.current_state = NULL; ocs_node_lock_free(node); ocs_lock_free(&node->pend_frames_lock); ocs_lock_free(&node->active_ios_lock); /* return to free list */ ocs_device_lock(ocs); ocs_list_add_tail(&xport->nodes_free_list, node); ocs_device_unlock(ocs); if(ns != NULL) { /* sending fake RSCN event to name server node */ ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, NULL); } return rc; } /** * @brief free memory resources of a node object * * The node object's child objects are freed after which the * node object is freed. * * @param node pointer to a node object * * @return none */ void ocs_node_force_free(ocs_node_t *node) { ocs_io_t *io; ocs_io_t *next; ocs_io_t *els; ocs_io_t *els_next; /* shutdown sm processing */ ocs_sm_disable(&node->sm); ocs_strncpy(node->prev_state_name, node->current_state_name, sizeof(node->prev_state_name)); ocs_strncpy(node->current_state_name, "disabled", sizeof(node->current_state_name)); /* Let the backend cleanup if needed */ ocs_scsi_notify_node_force_free(node); ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->active_ios, io, next) { ocs_list_remove(&io->node->active_ios, io); ocs_io_free(node->ocs, io); } ocs_unlock(&node->active_ios_lock); /* free all pending ELS IOs */ ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) { /* can't call ocs_els_io_free() because lock is held; cleanup manually */ ocs_list_remove(&node->els_io_pend_list, els); ocs_io_free(node->ocs, els); } ocs_unlock(&node->active_ios_lock); /* free all active ELS IOs */ ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) { /* can't call ocs_els_io_free() because lock is held; cleanup manually */ ocs_list_remove(&node->els_io_active_list, els); ocs_io_free(node->ocs, els); } ocs_unlock(&node->active_ios_lock); /* manually purge pending frames (if any) */ ocs_node_purge_pending(node); ocs_node_free(node); } /** * @ingroup node_common * @brief Perform HW call to attach a remote node * * @param node pointer to node object * * @return 0 on success, non-zero otherwise */ int32_t ocs_node_attach(ocs_node_t *node) { int32_t rc = 0; ocs_sport_t *sport = node->sport; ocs_domain_t *domain = sport->domain; ocs_t *ocs = node->ocs; if (!domain->attached) { ocs_log_test(ocs, "Warning: ocs_node_attach with unattached domain\n"); return -1; } /* Update node->wwpn/wwnn */ ocs_node_build_eui_name(node->wwpn, sizeof(node->wwpn), ocs_node_get_wwpn(node)); ocs_node_build_eui_name(node->wwnn, sizeof(node->wwnn), ocs_node_get_wwnn(node)); if (ocs->enable_hlm) { ocs_node_group_init(node); } ocs_dma_copy_in(&node->sparm_dma_buf, node->service_params+4, sizeof(node->service_params)-4); /* take lock to protect node->rnode.attached */ ocs_node_lock(node); rc = ocs_hw_node_attach(&ocs->hw, &node->rnode, &node->sparm_dma_buf); if (OCS_HW_RTN_IS_ERROR(rc)) { ocs_log_test(ocs, "ocs_hw_node_attach failed: %d\n", rc); } ocs_node_unlock(node); return rc; } /** * @ingroup node_common * @brief Generate text for a node's fc_id * * The text for a nodes fc_id is generated, either as a well known name, or a 6 digit * hex value. * * @param fc_id fc_id * @param buffer text buffer * @param buffer_length text buffer length in bytes * * @return none */ void ocs_node_fcid_display(uint32_t fc_id, char *buffer, uint32_t buffer_length) { switch (fc_id) { case FC_ADDR_FABRIC: ocs_snprintf(buffer, buffer_length, "fabric"); break; case FC_ADDR_CONTROLLER: ocs_snprintf(buffer, buffer_length, "fabctl"); break; case FC_ADDR_NAMESERVER: ocs_snprintf(buffer, buffer_length, "nserve"); break; default: if (FC_ADDR_IS_DOMAIN_CTRL(fc_id)) { ocs_snprintf(buffer, buffer_length, "dctl%02x", FC_ADDR_GET_DOMAIN_CTRL(fc_id)); } else { ocs_snprintf(buffer, buffer_length, "%06x", fc_id); } break; } } /** * @brief update the node's display name * * The node's display name is updated, sometimes needed because the sport part * is updated after the node is allocated. * * @param node pointer to the node object * * @return none */ void ocs_node_update_display_name(ocs_node_t *node) { uint32_t port_id = node->rnode.fc_id; ocs_sport_t *sport = node->sport; char portid_display[16]; ocs_assert(sport); ocs_node_fcid_display(port_id, portid_display, sizeof(portid_display)); ocs_snprintf(node->display_name, sizeof(node->display_name), "%s.%s", sport->display_name, portid_display); } /** * @brief cleans up an XRI for the pending link services accept by aborting the * XRI if required. * *

Description

* This function is called when the LS accept is not sent. * * @param node Node for which should be cleaned up */ void ocs_node_send_ls_io_cleanup(ocs_node_t *node) { ocs_t *ocs = node->ocs; if (node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) { ocs_assert(node->ls_acc_io); ocs_log_debug(ocs, "[%s] cleaning up LS_ACC oxid=0x%x\n", node->display_name, node->ls_acc_oxid); node->ls_acc_io->hio = NULL; ocs_els_io_free(node->ls_acc_io); node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE; node->ls_acc_io = NULL; } } /** * @ingroup node_common * @brief state: shutdown a node * * A node is shutdown, * * @param ctx remote node sm context * @param evt event to process * @param arg per event optional argument * * @return returns NULL * * @note */ void * __ocs_node_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { int32_t rc; std_node_state_decl(); node_sm_trace(); switch(evt) { case OCS_EVT_ENTER: { ocs_node_hold_frames(node); ocs_assert(ocs_node_active_ios_empty(node), NULL); ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL); /* by default, we will be freeing node after we unwind */ node->req_free = 1; switch (node->shutdown_reason) { case OCS_NODE_SHUTDOWN_IMPLICIT_LOGO: /* sm: if shutdown reason is implicit logout / ocs_node_attach * Node shutdown b/c of PLOGI received when node already * logged in. We have PLOGI service parameters, so submit * node attach; we won't be freeing this node */ /* currently, only case for implicit logo is PLOGI recvd. Thus, * node's ELS IO pending list won't be empty (PLOGI will be on it) */ ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL); node_printf(node, "Shutdown reason: implicit logout, re-authenticate\n"); ocs_scsi_io_alloc_enable(node); /* Re-attach node with the same HW node resources */ node->req_free = 0; rc = ocs_node_attach(node); ocs_node_transition(node, __ocs_d_wait_node_attach, NULL); if (rc == OCS_HW_RTN_SUCCESS_SYNC) { ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL); } break; case OCS_NODE_SHUTDOWN_EXPLICIT_LOGO: { int8_t pend_frames_empty; /* cleanup any pending LS_ACC ELSs */ ocs_node_send_ls_io_cleanup(node); ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL); ocs_lock(&node->pend_frames_lock); pend_frames_empty = ocs_list_empty(&node->pend_frames); ocs_unlock(&node->pend_frames_lock); /* there are two scenarios where we want to keep this node alive: * 1. there are pending frames that need to be processed or * 2. we're an initiator and the remote node is a target and we * need to re-authenticate */ node_printf(node, "Shutdown: explicit logo pend=%d sport.ini=%d node.tgt=%d\n", !pend_frames_empty, node->sport->enable_ini, node->targ); if((!pend_frames_empty) || (node->sport->enable_ini && node->targ)) { uint8_t send_plogi = FALSE; if (node->sport->enable_ini && node->targ) { /* we're an initiator and node shutting down is a target; we'll * need to re-authenticate in initial state */ send_plogi = TRUE; } /* transition to __ocs_d_init (will retain HW node resources) */ ocs_scsi_io_alloc_enable(node); node->req_free = 0; /* either pending frames exist, or we're re-authenticating with PLOGI * (or both); in either case, return to initial state */ ocs_node_init_device(node, send_plogi); } /* else: let node shutdown occur */ break; } case OCS_NODE_SHUTDOWN_DEFAULT: default: /* shutdown due to link down, node going away (xport event) or * sport shutdown, purge pending and proceed to cleanup node */ /* cleanup any pending LS_ACC ELSs */ ocs_node_send_ls_io_cleanup(node); ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL); node_printf(node, "Shutdown reason: default, purge pending\n"); ocs_node_purge_pending(node); break; } break; } case OCS_EVT_EXIT: ocs_node_accept_frames(node); break; default: __ocs_node_common(__func__, ctx, evt, arg); return NULL; } return NULL; } /** * @ingroup common_node * @brief Checks to see if ELS's have been quiesced * * Check if ELS's have been quiesced. If so, transition to the * next state in the shutdown process. * * @param node Node for which ELS's are checked * * @return Returns 1 if ELS's have been quiesced, 0 otherwise. */ static int ocs_node_check_els_quiesced(ocs_node_t *node) { ocs_assert(node, -1); /* check to see if ELS requests, completions are quiesced */ if ((node->els_req_cnt == 0) && (node->els_cmpl_cnt == 0) && ocs_els_io_list_empty(node, &node->els_io_active_list)) { if (!node->attached) { /* hw node detach already completed, proceed */ node_printf(node, "HW node not attached\n"); ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL); } else { /* hw node detach hasn't completed, transition and wait */ node_printf(node, "HW node still attached\n"); ocs_node_transition(node, __ocs_node_wait_node_free, NULL); } return 1; } return 0; } /** * @ingroup common_node * @brief Initiate node IO cleanup. * * Note: this function must be called with a non-attached node * or a node for which the node detach (ocs_hw_node_detach()) * has already been initiated. * * @param node Node for which shutdown is initiated * * @return Returns None. */ void ocs_node_initiate_cleanup(ocs_node_t *node) { ocs_io_t *els; ocs_io_t *els_next; ocs_t *ocs; ocs_assert(node); ocs = node->ocs; /* first cleanup ELS's that are pending (not yet active) */ ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) { /* skip the ELS IO for which a response will be sent after shutdown */ if ((node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) && (els == node->ls_acc_io)) { continue; } /* can't call ocs_els_io_free() because lock is held; cleanup manually */ node_printf(node, "Freeing pending els %s\n", els->display_name); ocs_list_remove(&node->els_io_pend_list, els); ocs_io_free(node->ocs, els); } ocs_unlock(&node->active_ios_lock); if (node->ls_acc_io && node->ls_acc_io->hio != NULL) { /* * if there's an IO that will result in an LS_ACC after * shutdown and its HW IO is non-NULL, it better be an * implicit logout in vanilla sequence coalescing. In this * case, force the LS_ACC to go out on another XRI (hio) * since the previous will have been aborted by the UNREG_RPI */ ocs_assert(node->shutdown_reason == OCS_NODE_SHUTDOWN_IMPLICIT_LOGO); ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI); node_printf(node, "invalidating ls_acc_io due to implicit logo\n"); /* No need to abort because the unreg_rpi takes care of it, just free */ ocs_hw_io_free(&ocs->hw, node->ls_acc_io->hio); /* NULL out hio to force the LS_ACC to grab a new XRI */ node->ls_acc_io->hio = NULL; } /* * if ELS's have already been quiesced, will move to next state * if ELS's have not been quiesced, abort them */ if (ocs_node_check_els_quiesced(node) == 0) { /* * Abort all ELS's since ELS's won't be aborted by HW * node free. */ ocs_node_abort_all_els(node); ocs_node_transition(node, __ocs_node_wait_els_shutdown, NULL); } } /** * @ingroup node_common * @brief Node state machine: Wait for all ELSs to complete. * *

Description

* State waits for all ELSs to complete after aborting all * outstanding . * * @param ctx Remote node state machine context. * @param evt Event to process. * @param arg Per event optional argument. * * @return Returns NULL. */ void * __ocs_node_wait_els_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { uint8_t check_quiesce = FALSE; std_node_state_decl(); node_sm_trace(); switch(evt) { case OCS_EVT_ENTER: { ocs_node_hold_frames(node); if (ocs_els_io_list_empty(node, &node->els_io_active_list)) { node_printf(node, "All ELS IOs complete\n"); check_quiesce = TRUE; } break; } case OCS_EVT_EXIT: ocs_node_accept_frames(node); break; case OCS_EVT_SRRS_ELS_REQ_OK: case OCS_EVT_SRRS_ELS_REQ_FAIL: case OCS_EVT_SRRS_ELS_REQ_RJT: case OCS_EVT_ELS_REQ_ABORTED: ocs_assert(node->els_req_cnt, NULL); node->els_req_cnt--; check_quiesce = TRUE; break; case OCS_EVT_SRRS_ELS_CMPL_OK: case OCS_EVT_SRRS_ELS_CMPL_FAIL: ocs_assert(node->els_cmpl_cnt, NULL); node->els_cmpl_cnt--; check_quiesce = TRUE; break; case OCS_EVT_ALL_CHILD_NODES_FREE: /* all ELS IO's complete */ node_printf(node, "All ELS IOs complete\n"); ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL); check_quiesce = TRUE; break; case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY: break; case OCS_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; /* ignore shutdown events as we're already in shutdown path */ case OCS_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; /* fall through */ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: node_printf(node, "%s received\n", ocs_sm_event_name(evt)); break; default: __ocs_node_common(__func__, ctx, evt, arg); return NULL; } if (check_quiesce) { ocs_node_check_els_quiesced(node); } return NULL; } /** * @ingroup node_command * @brief Node state machine: Wait for a HW node free event to * complete. * *

Description

* State waits for the node free event to be received from the HW. * * @param ctx Remote node state machine context. * @param evt Event to process. * @param arg Per event optional argument. * * @return Returns NULL. */ void * __ocs_node_wait_node_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { std_node_state_decl(); node_sm_trace(); switch(evt) { case OCS_EVT_ENTER: ocs_node_hold_frames(node); break; case OCS_EVT_EXIT: ocs_node_accept_frames(node); break; case OCS_EVT_NODE_FREE_OK: /* node is officially no longer attached */ node->attached = FALSE; ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL); break; case OCS_EVT_ALL_CHILD_NODES_FREE: case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY: /* As IOs and ELS IO's complete we expect to get these events */ break; case OCS_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; /* ignore shutdown events as we're already in shutdown path */ case OCS_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; /* Fall through */ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: node_printf(node, "%s received\n", ocs_sm_event_name(evt)); break; default: __ocs_node_common(__func__, ctx, evt, arg); return NULL; } return NULL; } /** * @ingroup node_common * @brief state: initiate node shutdown * * State is entered when a node receives a shutdown event, and it's waiting * for all the active IOs and ELS IOs associated with the node to complete. * * @param ctx remote node sm context * @param evt event to process * @param arg per event optional argument * * @return returns NULL */ void * __ocs_node_wait_ios_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { ocs_io_t *io; ocs_io_t *next; std_node_state_decl(); node_sm_trace(); switch(evt) { case OCS_EVT_ENTER: ocs_node_hold_frames(node); /* first check to see if no ELS IOs are outstanding */ if (ocs_els_io_list_empty(node, &node->els_io_active_list)) { /* If there are any active IOS, Free them. */ if (!ocs_node_active_ios_empty(node)) { ocs_lock(&node->active_ios_lock); ocs_list_foreach_safe(&node->active_ios, io, next) { ocs_list_remove(&io->node->active_ios, io); ocs_io_free(node->ocs, io); } ocs_unlock(&node->active_ios_lock); } ocs_node_transition(node, __ocs_node_shutdown, NULL); } break; case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY: case OCS_EVT_ALL_CHILD_NODES_FREE: { if (ocs_node_active_ios_empty(node) && ocs_els_io_list_empty(node, &node->els_io_active_list)) { ocs_node_transition(node, __ocs_node_shutdown, NULL); } break; } case OCS_EVT_EXIT: ocs_node_accept_frames(node); break; case OCS_EVT_SRRS_ELS_REQ_FAIL: /* Can happen as ELS IO IO's complete */ ocs_assert(node->els_req_cnt, NULL); node->els_req_cnt--; break; /* ignore shutdown events as we're already in shutdown path */ case OCS_EVT_SHUTDOWN: /* have default shutdown event take precedence */ node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT; /* fall through */ case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO: case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO: ocs_log_debug(ocs, "[%s] %-20s\n", node->display_name, ocs_sm_event_name(evt)); break; case OCS_EVT_DOMAIN_ATTACH_OK: /* don't care about domain_attach_ok */ break; default: __ocs_node_common(__func__, ctx, evt, arg); return NULL; } return NULL; } /** * @ingroup node_common * @brief state: common node event handler * * Handle common/shared node events * * @param funcname calling function's name * @param ctx remote node sm context * @param evt event to process * @param arg per event optional argument * * @return returns NULL */ void * __ocs_node_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { ocs_node_t *node = NULL; ocs_t *ocs = NULL; ocs_node_cb_t *cbdata = arg; ocs_assert(ctx, NULL); ocs_assert(ctx->app, NULL); node = ctx->app; ocs_assert(node->ocs, NULL); ocs = node->ocs; switch(evt) { case OCS_EVT_ENTER: case OCS_EVT_REENTER: case OCS_EVT_EXIT: case OCS_EVT_SPORT_TOPOLOGY_NOTIFY: case OCS_EVT_NODE_MISSING: case OCS_EVT_FCP_CMD_RCVD: break; case OCS_EVT_NODE_REFOUND: node->refound = 1; break; /* node->attached must be set appropriately for all node attach/detach events */ case OCS_EVT_NODE_ATTACH_OK: node->attached = TRUE; break; case OCS_EVT_NODE_FREE_OK: case OCS_EVT_NODE_ATTACH_FAIL: node->attached = FALSE; break; /* handle any ELS completions that other states either didn't care about * or forgot about */ case OCS_EVT_SRRS_ELS_CMPL_OK: case OCS_EVT_SRRS_ELS_CMPL_FAIL: ocs_assert(node->els_cmpl_cnt, NULL); node->els_cmpl_cnt--; break; /* handle any ELS request completions that other states either didn't care about * or forgot about */ case OCS_EVT_SRRS_ELS_REQ_OK: case OCS_EVT_SRRS_ELS_REQ_FAIL: case OCS_EVT_SRRS_ELS_REQ_RJT: case OCS_EVT_ELS_REQ_ABORTED: ocs_assert(node->els_req_cnt, NULL); node->els_req_cnt--; break; case OCS_EVT_ELS_RCVD: { fc_header_t *hdr = cbdata->header->dma.virt; /* Unsupported ELS was received, send LS_RJT, command not supported */ ocs_log_debug(ocs, "[%s] (%s) ELS x%02x, LS_RJT not supported\n", node->display_name, funcname, ((uint8_t*)cbdata->payload->dma.virt)[0]); ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_COMMAND_NOT_SUPPORTED, FC_EXPL_NO_ADDITIONAL, 0, NULL, NULL); break; } case OCS_EVT_PLOGI_RCVD: case OCS_EVT_FLOGI_RCVD: case OCS_EVT_LOGO_RCVD: case OCS_EVT_PRLI_RCVD: case OCS_EVT_PRLO_RCVD: case OCS_EVT_PDISC_RCVD: case OCS_EVT_FDISC_RCVD: case OCS_EVT_ADISC_RCVD: case OCS_EVT_RSCN_RCVD: case OCS_EVT_SCR_RCVD: { fc_header_t *hdr = cbdata->header->dma.virt; /* sm: / send ELS_RJT */ ocs_log_debug(ocs, "[%s] (%s) %s sending ELS_RJT\n", node->display_name, funcname, ocs_sm_event_name(evt)); /* if we didn't catch this in a state, send generic LS_RJT */ ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NO_ADDITIONAL, 0, NULL, NULL); break; } case OCS_EVT_GID_PT_RCVD: case OCS_EVT_RFT_ID_RCVD: case OCS_EVT_RFF_ID_RCVD: { fc_header_t *hdr = cbdata->header->dma.virt; ocs_log_debug(ocs, "[%s] (%s) %s sending CT_REJECT\n", node->display_name, funcname, ocs_sm_event_name(evt)); ocs_send_ct_rsp(cbdata->io, hdr->ox_id, cbdata->payload->dma.virt, FCCT_HDR_CMDRSP_REJECT, FCCT_COMMAND_NOT_SUPPORTED, 0); break; } case OCS_EVT_ABTS_RCVD: { fc_header_t *hdr = cbdata->header->dma.virt; ocs_log_debug(ocs, "[%s] (%s) %s sending BA_ACC\n", node->display_name, funcname, ocs_sm_event_name(evt)); /* sm: send BA_ACC */ ocs_bls_send_acc_hdr(cbdata->io, hdr); break; } default: ocs_log_test(node->ocs, "[%s] %-20s %-20s not handled\n", node->display_name, funcname, ocs_sm_event_name(evt)); break; } return NULL; } /** * @ingroup node_common * @brief save node service parameters * * Service parameters are copyed into the node structure * * @param node pointer to node structure * @param payload pointer to service parameters to save * * @return none */ void ocs_node_save_sparms(ocs_node_t *node, void *payload) { ocs_memcpy(node->service_params, payload, sizeof(node->service_params)); } /** * @ingroup node_common * @brief Post event to node state machine context * * This is used by the node state machine code to post events to the nodes. Upon * completion of the event posting, if the nesting depth is zero and we're not holding * inbound frames, then the pending frames are processed. * * @param node pointer to node * @param evt event to post * @param arg event posting argument * * @return none */ void ocs_node_post_event(ocs_node_t *node, ocs_sm_event_t evt, void *arg) { int free_node = FALSE; ocs_assert(node); ocs_node_lock(node); node->evtdepth ++; ocs_sm_post_event(&node->sm, evt, arg); /* If our event call depth is one and we're not holding frames * then we can dispatch any pending frames. We don't want to allow * the ocs_process_node_pending() call to recurse. */ if (!node->hold_frames && (node->evtdepth == 1)) { ocs_process_node_pending(node); } node->evtdepth --; /* Free the node object if so requested, and we're at an event * call depth of zero */ if ((node->evtdepth == 0) && node->req_free) { free_node = TRUE; } ocs_node_unlock(node); if (free_node) { ocs_node_free(node); } return; } /** * @ingroup node_common * @brief transition state of a node * * The node's state is transitioned to the requested state. Entry/Exit * events are posted as needed. * * @param node pointer to node * @param state state to transition to * @param data transition data * * @return none */ void ocs_node_transition(ocs_node_t *node, ocs_sm_function_t state, void *data) { ocs_sm_ctx_t *ctx = &node->sm; ocs_node_lock(node); if (ctx->current_state == state) { ocs_node_post_event(node, OCS_EVT_REENTER, data); } else { ocs_node_post_event(node, OCS_EVT_EXIT, data); ctx->current_state = state; ocs_node_post_event(node, OCS_EVT_ENTER, data); } ocs_node_unlock(node); } /** * @ingroup node_common * @brief build EUI formatted WWN * * Build a WWN given the somewhat transport agnostic iScsi naming specification, for FC * use the eui. format, an ascii string such as: "eui.10000000C9A19501" * * @param buffer buffer to place formatted name into * @param buffer_len length in bytes of the buffer * @param eui_name cpu endian 64 bit WWN value * * @return none */ void ocs_node_build_eui_name(char *buffer, uint32_t buffer_len, uint64_t eui_name) { ocs_memset(buffer, 0, buffer_len); ocs_snprintf(buffer, buffer_len, "eui.%016llx", (unsigned long long)eui_name); } /** * @ingroup node_common * @brief return nodes' WWPN as a uint64_t * * The WWPN is computed from service parameters and returned as a uint64_t * * @param node pointer to node structure * * @return WWPN * */ uint64_t ocs_node_get_wwpn(ocs_node_t *node) { fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params; return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo))); } /** * @ingroup node_common * @brief return nodes' WWNN as a uint64_t * * The WWNN is computed from service parameters and returned as a uint64_t * * @param node pointer to node structure * * @return WWNN * */ uint64_t ocs_node_get_wwnn(ocs_node_t *node) { fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params; return (((uint64_t)ocs_be32toh(sp->node_name_hi) << 32ll) | (ocs_be32toh(sp->node_name_lo))); } /** * @brief Generate node ddump data * * Generates the node ddumpdata * * @param textbuf pointer to text buffer * @param node pointer to node context * * @return Returns 0 on success, or a negative value on failure. */ int ocs_ddump_node(ocs_textbuf_t *textbuf, ocs_node_t *node) { ocs_io_t *io; ocs_io_t *els; int retval = 0; ocs_ddump_section(textbuf, "node", node->instance_index); ocs_ddump_value(textbuf, "display_name", "%s", node->display_name); ocs_ddump_value(textbuf, "current_state", "%s", node->current_state_name); ocs_ddump_value(textbuf, "prev_state", "%s", node->prev_state_name); ocs_ddump_value(textbuf, "current_evt", "%s", ocs_sm_event_name(node->current_evt)); ocs_ddump_value(textbuf, "prev_evt", "%s", ocs_sm_event_name(node->prev_evt)); ocs_ddump_value(textbuf, "indicator", "%#x", node->rnode.indicator); ocs_ddump_value(textbuf, "fc_id", "%#06x", node->rnode.fc_id); ocs_ddump_value(textbuf, "attached", "%d", node->rnode.attached); ocs_ddump_value(textbuf, "hold_frames", "%d", node->hold_frames); ocs_ddump_value(textbuf, "io_alloc_enabled", "%d", node->io_alloc_enabled); ocs_ddump_value(textbuf, "shutdown_reason", "%d", node->shutdown_reason); ocs_ddump_value(textbuf, "send_ls_acc", "%d", node->send_ls_acc); ocs_ddump_value(textbuf, "ls_acc_did", "%d", node->ls_acc_did); ocs_ddump_value(textbuf, "ls_acc_oxid", "%#04x", node->ls_acc_oxid); ocs_ddump_value(textbuf, "req_free", "%d", node->req_free); ocs_ddump_value(textbuf, "els_req_cnt", "%d", node->els_req_cnt); ocs_ddump_value(textbuf, "els_cmpl_cnt", "%d", node->els_cmpl_cnt); ocs_ddump_value(textbuf, "targ", "%d", node->targ); ocs_ddump_value(textbuf, "init", "%d", node->init); ocs_ddump_value(textbuf, "wwnn", "%s", node->wwnn); ocs_ddump_value(textbuf, "wwpn", "%s", node->wwpn); ocs_ddump_value(textbuf, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0); ocs_ddump_value(textbuf, "chained_io_count", "%d", node->chained_io_count); ocs_ddump_value(textbuf, "abort_cnt", "%d", node->abort_cnt); ocs_display_sparams(NULL, "node_sparams", 1, textbuf, node->service_params+4); ocs_lock(&node->pend_frames_lock); if (!ocs_list_empty(&node->pend_frames)) { ocs_hw_sequence_t *frame; ocs_ddump_section(textbuf, "pending_frames", 0); ocs_list_foreach(&node->pend_frames, frame) { fc_header_t *hdr; char buf[128]; hdr = frame->header->dma.virt; ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id), frame->payload->dma.len); ocs_ddump_value(textbuf, "frame", "%s", buf); } ocs_ddump_endsection(textbuf, "pending_frames", 0); } ocs_unlock(&node->pend_frames_lock); ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node); ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node); ocs_lock(&node->active_ios_lock); ocs_ddump_section(textbuf, "active_ios", 0); ocs_list_foreach(&node->active_ios, io) { ocs_ddump_io(textbuf, io); } ocs_ddump_endsection(textbuf, "active_ios", 0); ocs_ddump_section(textbuf, "els_io_pend_list", 0); ocs_list_foreach(&node->els_io_pend_list, els) { ocs_ddump_els(textbuf, els); } ocs_ddump_endsection(textbuf, "els_io_pend_list", 0); ocs_ddump_section(textbuf, "els_io_active_list", 0); ocs_list_foreach(&node->els_io_active_list, els) { ocs_ddump_els(textbuf, els); } ocs_ddump_endsection(textbuf, "els_io_active_list", 0); ocs_unlock(&node->active_ios_lock); ocs_ddump_endsection(textbuf, "node", node->instance_index); return retval; } /** * @brief check ELS request completion * * Check ELS request completion event to make sure it's for the * ELS request we expect. If not, invoke given common event * handler and return an error. * * @param ctx state machine context * @param evt ELS request event * @param arg event argument * @param cmd ELS command expected * @param node_common_func common event handler to call if ELS * doesn't match * @param funcname function name that called this * * @return zero if ELS command matches, -1 otherwise */ int32_t node_check_els_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint8_t cmd, ocs_node_common_func_t node_common_func, const char *funcname) { ocs_node_t *node = NULL; ocs_t *ocs = NULL; ocs_node_cb_t *cbdata = arg; fc_els_gen_t *els_gen = NULL; ocs_assert(ctx, -1); node = ctx->app; ocs_assert(node, -1); ocs = node->ocs; ocs_assert(ocs, -1); cbdata = arg; ocs_assert(cbdata, -1); ocs_assert(cbdata->els, -1); els_gen = (fc_els_gen_t *)cbdata->els->els_req.virt; ocs_assert(els_gen, -1); if ((cbdata->els->hio_type != OCS_HW_ELS_REQ) || (els_gen->command_code != cmd)) { if (cbdata->els->hio_type != OCS_HW_ELS_REQ) { ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received type=%d\n", node->display_name, funcname, cmd, cbdata->els->hio_type); } else { ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received cmd=x%x\n", node->display_name, funcname, cmd, els_gen->command_code); } /* send event to common handler */ node_common_func(funcname, ctx, evt, arg); return -1; } return 0; } /** * @brief check NS request completion * * Check ELS request completion event to make sure it's for the * nameserver request we expect. If not, invoke given common * event handler and return an error. * * @param ctx state machine context * @param evt ELS request event * @param arg event argument * @param cmd nameserver command expected * @param node_common_func common event handler to call if * nameserver cmd doesn't match * @param funcname function name that called this * * @return zero if NS command matches, -1 otherwise */ int32_t node_check_ns_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint32_t cmd, ocs_node_common_func_t node_common_func, const char *funcname) { ocs_node_t *node = NULL; ocs_t *ocs = NULL; ocs_node_cb_t *cbdata = arg; fcct_iu_header_t *fcct = NULL; ocs_assert(ctx, -1); node = ctx->app; ocs_assert(node, -1); ocs = node->ocs; ocs_assert(ocs, -1); cbdata = arg; ocs_assert(cbdata, -1); ocs_assert(cbdata->els, -1); fcct = (fcct_iu_header_t *)cbdata->els->els_req.virt; ocs_assert(fcct, -1); if ((cbdata->els->hio_type != OCS_HW_FC_CT) || fcct->cmd_rsp_code != ocs_htobe16(cmd)) { if (cbdata->els->hio_type != OCS_HW_FC_CT) { ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received type=%d\n", node->display_name, funcname, cmd, cbdata->els->hio_type); } else { ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received cmd=x%x\n", node->display_name, funcname, cmd, fcct->cmd_rsp_code); } /* send event to common handler */ node_common_func(funcname, ctx, evt, arg); return -1; } return 0; } void ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *object) { ocs_io_t *io; ocs_node_t *node = (ocs_node_t *)object; ocs_mgmt_start_section(textbuf, "node", node->instance_index); /* Readonly values */ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fc_id"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "attached"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "hold_frames"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "shutting_down"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "req_free"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id_in_use"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "abort_cnt"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "targ"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "init"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwpn"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwnn"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "pend_frames"); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "chained_io_count"); /* Actions */ ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume"); ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) { if ((io->mgmt_functions) && (io->mgmt_functions->get_list_handler)) { io->mgmt_functions->get_list_handler(textbuf, io); } } ocs_unlock(&node->active_ios_lock); ocs_mgmt_end_section(textbuf, "node", node->instance_index); } int ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object) { ocs_io_t *io; ocs_node_t *node = (ocs_node_t *)object; char qualifier[80]; int retval = -1; ocs_mgmt_start_section(textbuf, "node", node->instance_index); ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = name + strlen(qualifier) +1; /* See if it's a value I can supply */ if (ocs_strcmp(unqualified_name, "display_name") == 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name); retval = 0; } else if (ocs_strcmp(unqualified_name, "indicator") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator); retval = 0; } else if (ocs_strcmp(unqualified_name, "fc_id") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id); retval = 0; } else if (ocs_strcmp(unqualified_name, "attached") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached); retval = 0; } else if (ocs_strcmp(unqualified_name, "hold_frames") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames); retval = 0; } else if (ocs_strcmp(unqualified_name, "io_alloc_enabled") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled); retval = 0; } else if (ocs_strcmp(unqualified_name, "req_free") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free); retval = 0; } else if (ocs_strcmp(unqualified_name, "ls_acc_oxid") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid); retval = 0; } else if (ocs_strcmp(unqualified_name, "ls_acc_did") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did); retval = 0; } else if (ocs_strcmp(unqualified_name, "abort_cnt") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt); retval = 0; } else if (ocs_strcmp(unqualified_name, "targ") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ", node->targ); retval = 0; } else if (ocs_strcmp(unqualified_name, "init") == 0) { ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init", node->init); retval = 0; } else if (ocs_strcmp(unqualified_name, "wwpn") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn); retval = 0; } else if (ocs_strcmp(unqualified_name, "wwnn") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn); retval = 0; } else if (ocs_strcmp(unqualified_name, "current_state") == 0) { ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name); retval = 0; } else if (ocs_strcmp(unqualified_name, "login_state") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0); retval = 0; } else if (ocs_strcmp(unqualified_name, "pend_frames") == 0) { ocs_hw_sequence_t *frame; ocs_lock(&node->pend_frames_lock); ocs_list_foreach(&node->pend_frames, frame) { fc_header_t *hdr; char buf[128]; hdr = frame->header->dma.virt; ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id), frame->payload->dma.len); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf); } ocs_unlock(&node->pend_frames_lock); retval = 0; } else if (ocs_strcmp(unqualified_name, "chained_io_count") == 0) { ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count); retval = 0; } else { /* If I didn't know the value of this status pass the request to each of my children */ ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) { if ((io->mgmt_functions) && (io->mgmt_functions->get_handler)) { retval = io->mgmt_functions->get_handler(textbuf, qualifier, name, io); } if (retval == 0) { break; } } ocs_unlock(&node->active_ios_lock); } } ocs_mgmt_end_section(textbuf, "node", node->instance_index); return retval; } void ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *object) { ocs_io_t *io; ocs_node_t *node = (ocs_node_t *)object; ocs_hw_sequence_t *frame; ocs_mgmt_start_section(textbuf, "node", node->instance_index); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ", node->targ); ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init", node->init); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn); ocs_lock(&node->pend_frames_lock); ocs_list_foreach(&node->pend_frames, frame) { fc_header_t *hdr; char buf[128]; hdr = frame->header->dma.virt; ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id), frame->payload->dma.len); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf); } ocs_unlock(&node->pend_frames_lock); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count); ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume"); ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name); ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0); ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) { if ((io->mgmt_functions) && (io->mgmt_functions->get_all_handler)) { io->mgmt_functions->get_all_handler(textbuf,io); } } ocs_unlock(&node->active_ios_lock); ocs_mgmt_end_section(textbuf, "node", node->instance_index); } int ocs_mgmt_node_set(char *parent, char *name, char *value, void *object) { ocs_io_t *io; ocs_node_t *node = (ocs_node_t *)object; char qualifier[80]; int retval = -1; ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) { ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) { if ((io->mgmt_functions) && (io->mgmt_functions->set_handler)) { retval = io->mgmt_functions->set_handler(qualifier, name, value, io); } if (retval == 0) { break; } } ocs_unlock(&node->active_ios_lock); } return retval; } int ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length, void *arg_out, uint32_t arg_out_length, void *object) { ocs_io_t *io; ocs_node_t *node = (ocs_node_t *)object; char qualifier[80]; int retval = -1; ocs_snprintf(qualifier, sizeof(qualifier), "%s.node%d", parent, node->instance_index); /* If it doesn't start with my qualifier I don't know what to do with it */ if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) { char *unqualified_name = action + strlen(qualifier) +1; if (ocs_strcmp(unqualified_name, "resume") == 0) { ocs_node_post_event(node, OCS_EVT_RESUME, NULL); } { /* If I didn't know how to do this action pass the request to each of my children */ ocs_lock(&node->active_ios_lock); ocs_list_foreach(&node->active_ios, io) { if ((io->mgmt_functions) && (io->mgmt_functions->exec_handler)) { retval = io->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length, arg_out, arg_out_length, io); } if (retval == 0) { break; } } ocs_unlock(&node->active_ios_lock); } } return retval; } /** * @brief Return TRUE if active ios list is empty * * Test if node->active_ios list is empty while holding the node->active_ios_lock. * * @param node pointer to node object * * @return TRUE if node active ios list is empty */ int ocs_node_active_ios_empty(ocs_node_t *node) { int empty; ocs_lock(&node->active_ios_lock); empty = ocs_list_empty(&node->active_ios); ocs_unlock(&node->active_ios_lock); return empty; } /** * @brief Pause a node * * The node is placed in the __ocs_node_paused state after saving the state * to return to * * @param node Pointer to node object * @param state State to resume to * * @return none */ void ocs_node_pause(ocs_node_t *node, ocs_sm_function_t state) { node->nodedb_state = state; ocs_node_transition(node, __ocs_node_paused, NULL); } /** * @brief Paused node state * * This state is entered when a state is "paused". When resumed, the node * is transitioned to a previously saved state (node->ndoedb_state) * * @param ctx Remote node state machine context. * @param evt Event to process. * @param arg Per event optional argument. * * @return returns NULL */ void * __ocs_node_paused(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg) { std_node_state_decl(); node_sm_trace(); switch(evt) { case OCS_EVT_ENTER: node_printf(node, "Paused\n"); break; case OCS_EVT_RESUME: { ocs_sm_function_t pf = node->nodedb_state; node->nodedb_state = NULL; ocs_node_transition(node, pf, NULL); break; } case OCS_EVT_DOMAIN_ATTACH_OK: break; case OCS_EVT_SHUTDOWN: node->req_free = 1; break; default: __ocs_node_common(__func__, ctx, evt, arg); break; } return NULL; } /** * @brief Resume a paused state * * Posts a resume event to the paused node. * * @param node Pointer to node object * * @return returns 0 for success, a negative error code value for failure. */ int32_t ocs_node_resume(ocs_node_t *node) { ocs_assert(node != NULL, -1); ocs_node_post_event(node, OCS_EVT_RESUME, NULL); return 0; } /** * @ingroup node_common * @brief Dispatch a ELS frame. * *

Description

* An ELS frame is dispatched to the \c node state machine. * RQ Pair mode: this function is always called with a NULL hw * io. * * @param node Node that originated the frame. * @param seq header/payload sequence buffers * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled and RX buffers need * to be returned. */ int32_t ocs_node_recv_els_frame(ocs_node_t *node, ocs_hw_sequence_t *seq) { struct { uint32_t cmd; ocs_sm_event_t evt; uint32_t payload_size; } els_cmd_list[] = { {FC_ELS_CMD_PLOGI, OCS_EVT_PLOGI_RCVD, sizeof(fc_plogi_payload_t)}, {FC_ELS_CMD_FLOGI, OCS_EVT_FLOGI_RCVD, sizeof(fc_plogi_payload_t)}, {FC_ELS_CMD_LOGO, OCS_EVT_LOGO_RCVD, sizeof(fc_acc_payload_t)}, {FC_ELS_CMD_RRQ, OCS_EVT_RRQ_RCVD, sizeof(fc_acc_payload_t)}, {FC_ELS_CMD_PRLI, OCS_EVT_PRLI_RCVD, sizeof(fc_prli_payload_t)}, {FC_ELS_CMD_PRLO, OCS_EVT_PRLO_RCVD, sizeof(fc_prlo_payload_t)}, {FC_ELS_CMD_PDISC, OCS_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD}, {FC_ELS_CMD_FDISC, OCS_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD}, {FC_ELS_CMD_ADISC, OCS_EVT_ADISC_RCVD, sizeof(fc_adisc_payload_t)}, {FC_ELS_CMD_RSCN, OCS_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD}, {FC_ELS_CMD_SCR , OCS_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD}, }; ocs_t *ocs = node->ocs; ocs_node_cb_t cbdata; fc_header_t *hdr = seq->header->dma.virt; uint8_t *buf = seq->payload->dma.virt; ocs_sm_event_t evt = OCS_EVT_ELS_RCVD; uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD; uint32_t i; ocs_memset(&cbdata, 0, sizeof(cbdata)); cbdata.header = seq->header; cbdata.payload = seq->payload; /* find a matching event for the ELS command */ for (i = 0; i < ARRAY_SIZE(els_cmd_list); i ++) { if (els_cmd_list[i].cmd == buf[0]) { evt = els_cmd_list[i].evt; payload_size = els_cmd_list[i].payload_size; break; } } switch(evt) { case OCS_EVT_FLOGI_RCVD: ocs_display_sparams(node->display_name, "flogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4); break; case OCS_EVT_FDISC_RCVD: ocs_display_sparams(node->display_name, "fdisc rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4); break; case OCS_EVT_PLOGI_RCVD: ocs_display_sparams(node->display_name, "plogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4); break; default: break; } cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER); if (cbdata.io != NULL) { cbdata.io->hw_priv = seq->hw_priv; /* if we're here, sequence initiative has been transferred */ cbdata.io->seq_init = 1; ocs_node_post_event(node, evt, &cbdata); } else { node_printf(node, "failure to allocate SCSI IO for ELS s_id %06x d_id %06x ox_id %04x rx_id %04x\n", fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id)); } ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup node_common * @brief Dispatch a ABTS frame (RQ Pair/sequence coalescing). * *

Description

* An ABTS frame is dispatched to the node state machine. This * function is used for both RQ Pair and sequence coalescing. * * @param node Node that originated the frame. * @param seq Header/payload sequence buffers * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled and RX buffers need * to be returned. */ int32_t ocs_node_recv_abts_frame(ocs_node_t *node, ocs_hw_sequence_t *seq) { ocs_t *ocs = node->ocs; ocs_xport_t *xport = ocs->xport; fc_header_t *hdr = seq->header->dma.virt; uint16_t ox_id = ocs_be16toh(hdr->ox_id); uint16_t rx_id = ocs_be16toh(hdr->rx_id); ocs_node_cb_t cbdata; int32_t rc = 0; node->abort_cnt++; /* * Check to see if the IO we want to abort is active, if it not active, * then we can send the BA_ACC using the send frame option */ if (ocs_io_find_tgt_io(ocs, node, ox_id, rx_id) == NULL) { uint32_t send_frame_capable; ocs_log_debug(ocs, "IO not found (ox_id %04x)\n", ox_id); /* If we have SEND_FRAME capability, then use it to send BA_ACC */ rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable); if ((rc == 0) && send_frame_capable) { rc = ocs_sframe_send_bls_acc(node, seq); if (rc) { ocs_log_test(ocs, "ocs_bls_acc_send_frame failed\n"); } return rc; } /* continuing */ } ocs_memset(&cbdata, 0, sizeof(cbdata)); cbdata.header = seq->header; cbdata.payload = seq->payload; cbdata.io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER); if (cbdata.io != NULL) { cbdata.io->hw_priv = seq->hw_priv; /* If we got this far, SIT=1 */ cbdata.io->seq_init = 1; /* fill out generic fields */ cbdata.io->ocs = ocs; cbdata.io->node = node; cbdata.io->cmd_tgt = TRUE; ocs_node_post_event(node, OCS_EVT_ABTS_RCVD, &cbdata); } else { ocs_atomic_add_return(&xport->io_alloc_failed_count, 1); node_printf(node, "SCSI IO allocation failed for ABTS received s_id %06x d_id %06x ox_id %04x rx_id %04x\n", fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id)); } /* ABTS processed, return RX buffer to the chip */ ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup node_common * @brief Dispatch a CT frame. * *

Description

* A CT frame is dispatched to the \c node state machine. * RQ Pair mode: this function is always called with a NULL hw * io. * * @param node Node that originated the frame. * @param seq header/payload sequence buffers * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled and RX buffers need * to be returned. */ int32_t ocs_node_recv_ct_frame(ocs_node_t *node, ocs_hw_sequence_t *seq) { ocs_t *ocs = node->ocs; fc_header_t *hdr = seq->header->dma.virt; fcct_iu_header_t *iu = seq->payload->dma.virt; ocs_sm_event_t evt = OCS_EVT_ELS_RCVD; uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD; uint16_t gscmd = ocs_be16toh(iu->cmd_rsp_code); ocs_node_cb_t cbdata; uint32_t i; struct { uint32_t cmd; ocs_sm_event_t evt; uint32_t payload_size; } ct_cmd_list[] = { {FC_GS_NAMESERVER_RFF_ID, OCS_EVT_RFF_ID_RCVD, 100}, {FC_GS_NAMESERVER_RFT_ID, OCS_EVT_RFT_ID_RCVD, 100}, {FC_GS_NAMESERVER_GNN_ID, OCS_EVT_GNN_ID_RCVD, 100}, {FC_GS_NAMESERVER_GPN_ID, OCS_EVT_GPN_ID_RCVD, 100}, {FC_GS_NAMESERVER_GFPN_ID, OCS_EVT_GFPN_ID_RCVD, 100}, {FC_GS_NAMESERVER_GFF_ID, OCS_EVT_GFF_ID_RCVD, 100}, {FC_GS_NAMESERVER_GID_FT, OCS_EVT_GID_FT_RCVD, 256}, {FC_GS_NAMESERVER_GID_PT, OCS_EVT_GID_PT_RCVD, 256}, {FC_GS_NAMESERVER_RPN_ID, OCS_EVT_RPN_ID_RCVD, 100}, {FC_GS_NAMESERVER_RNN_ID, OCS_EVT_RNN_ID_RCVD, 100}, {FC_GS_NAMESERVER_RCS_ID, OCS_EVT_RCS_ID_RCVD, 100}, {FC_GS_NAMESERVER_RSNN_NN, OCS_EVT_RSNN_NN_RCVD, 100}, {FC_GS_NAMESERVER_RSPN_ID, OCS_EVT_RSPN_ID_RCVD, 100}, {FC_GS_NAMESERVER_RHBA, OCS_EVT_RHBA_RCVD, 100}, {FC_GS_NAMESERVER_RPA, OCS_EVT_RPA_RCVD, 100}, }; ocs_memset(&cbdata, 0, sizeof(cbdata)); cbdata.header = seq->header; cbdata.payload = seq->payload; /* find a matching event for the ELS/GS command */ for (i = 0; i < ARRAY_SIZE(ct_cmd_list); i ++) { if (ct_cmd_list[i].cmd == gscmd) { evt = ct_cmd_list[i].evt; payload_size = ct_cmd_list[i].payload_size; break; } } /* Allocate an IO and send a reject */ cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER); if (cbdata.io == NULL) { node_printf(node, "GS IO failed for s_id %06x d_id %06x ox_id %04x rx_id %04x\n", fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id)); return -1; } cbdata.io->hw_priv = seq->hw_priv; ocs_node_post_event(node, evt, &cbdata); ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup node_common * @brief Dispatch a FCP command frame when the node is not ready. * *

Description

* A frame is dispatched to the \c node state machine. * * @param node Node that originated the frame. * @param seq header/payload sequence buffers * * @return Returns 0 if frame processed and RX buffers cleaned * up appropriately, -1 if frame not handled. */ int32_t ocs_node_recv_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq) { ocs_node_cb_t cbdata; ocs_t *ocs = node->ocs; ocs_memset(&cbdata, 0, sizeof(cbdata)); cbdata.header = seq->header; cbdata.payload = seq->payload; ocs_node_post_event(node, OCS_EVT_FCP_CMD_RCVD, &cbdata); ocs_hw_sequence_free(&ocs->hw, seq); return 0; } /** * @ingroup node_common * @brief Stub handler for non-ABTS BLS frames * *

Description

* Log message and drop. Customer can plumb it to their back-end as needed * * @param node Node that originated the frame. * @param seq header/payload sequence buffers * * @return Returns 0 */ int32_t ocs_node_recv_bls_no_sit(ocs_node_t *node, ocs_hw_sequence_t *seq) { fc_header_t *hdr = seq->header->dma.virt; node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n", ocs_htobe32(((uint32_t *)hdr)[0]), ocs_htobe32(((uint32_t *)hdr)[1]), ocs_htobe32(((uint32_t *)hdr)[2]), ocs_htobe32(((uint32_t *)hdr)[3]), ocs_htobe32(((uint32_t *)hdr)[4]), ocs_htobe32(((uint32_t *)hdr)[5])); return -1; } Index: head/sys/dev/pms/RefTisa/discovery/dm/dmdisc.c =================================================================== --- head/sys/dev/pms/RefTisa/discovery/dm/dmdisc.c (revision 359440) +++ head/sys/dev/pms/RefTisa/discovery/dm/dmdisc.c (revision 359441) @@ -1,7466 +1,7466 @@ /******************************************************************************* ** *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the *following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided *with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED *WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS *FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT *NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR *BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ** ********************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #ifdef FDS_DM #include #include #include #include #include #include /*****************************************************************************/ /*! \brief dmDiscover * * * Purpose: A discovery is started by this function * * \param dmRoot: DM context handle. * \param dmPortContext: Pointer to this instance of port context * \param option: Discovery option * * \return: * DM_RC_SUCCESS * DM_RC_FAILURE * */ /*****************************************************************************/ osGLOBAL bit32 dmDiscover( dmRoot_t *dmRoot, dmPortContext_t *dmPortContext, bit32 option) { dmIntPortContext_t *onePortContext = agNULL; bit32 ret = DM_RC_FAILURE; DM_DBG3(("dmDiscover: start\n")); onePortContext = (dmIntPortContext_t *)dmPortContext->dmData; if (onePortContext == agNULL) { DM_DBG1(("dmDiscover: onePortContext is NULL!!!\n")); return DM_RC_FAILURE; } if (onePortContext->valid == agFALSE) { DM_DBG1(("dmDiscover: invalid port!!!\n")); return DM_RC_FAILURE; } if (onePortContext->RegFailed == agTRUE) { DM_DBG1(("dmDiscover: Registration failed!!!\n")); return DM_RC_FAILURE; } switch ( option ) { case DM_DISCOVERY_OPTION_FULL_START: DM_DBG3(("dmDiscover: full, pid %d\n", onePortContext->id)); onePortContext->discovery.type = DM_DISCOVERY_OPTION_FULL_START; dmDiscoveryResetMCN(dmRoot, onePortContext); ret = dmFullDiscover(dmRoot, onePortContext); break; case DM_DISCOVERY_OPTION_INCREMENTAL_START: DM_DBG3(("dmDiscover: incremental, pid %d\n", onePortContext->id)); onePortContext->discovery.type = DM_DISCOVERY_OPTION_INCREMENTAL_START; dmDiscoveryResetMCN(dmRoot, onePortContext); ret = dmIncrementalDiscover(dmRoot, onePortContext, agFALSE); break; case DM_DISCOVERY_OPTION_ABORT: DM_DBG3(("dmDiscover: abort\n")); if (onePortContext->DiscoveryState != DM_DSTATE_COMPLETED) { if (onePortContext->discovery.pendingSMP == 0) { dmDiscoverAbort(dmRoot, onePortContext); tddmDiscoverCB( dmRoot, onePortContext->dmPortContext, dmDiscAborted ); } else { DM_DBG3(("dmDiscover: abortInProgress\n")); onePortContext->DiscoveryAbortInProgress = agTRUE; tddmDiscoverCB( dmRoot, dmPortContext, dmDiscAbortInProgress ); } } else { DM_DBG3(("dmDiscover: no discovery to abort\n")); tddmDiscoverCB( dmRoot, dmPortContext, dmDiscAbortInvalid ); } ret = DM_RC_SUCCESS; break; default: break; } return ret; } osGLOBAL bit32 dmFullDiscover( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmExpander_t *oneExpander = agNULL; dmSASSubID_t dmSASSubID; dmDeviceData_t *oneExpDeviceData = agNULL; DM_DBG1(("dmFullDiscover: start\n")); if (onePortContext->valid == agFALSE) { DM_DBG1(("dmFullDiscover: invalid port!!!\n")); return DM_RC_FAILURE; } if (onePortContext->DiscoveryState == DM_DSTATE_STARTED) { DM_DBG1(("dmFullDiscover: no two instances of discovery allowed!!!\n")); return DM_RC_FAILURE; } onePortContext->DiscoveryState = DM_DSTATE_STARTED; dmSASSubID.sasAddressHi = onePortContext->sasRemoteAddressHi; dmSASSubID.sasAddressLo = onePortContext->sasRemoteAddressLo; /* check OnePortContext->discovery.discoveringExpanderList */ oneExpander = dmExpFind(dmRoot, onePortContext, dmSASSubID.sasAddressHi, dmSASSubID.sasAddressLo); if (oneExpander != agNULL) { oneExpDeviceData = oneExpander->dmDevice; } else { /* check dmAllShared->mainExpanderList */ oneExpander = dmExpMainListFind(dmRoot, onePortContext, dmSASSubID.sasAddressHi, dmSASSubID.sasAddressLo); if (oneExpander != agNULL) { oneExpDeviceData = oneExpander->dmDevice; } } if (oneExpDeviceData != agNULL) { dmSASSubID.initiator_ssp_stp_smp = oneExpDeviceData->initiator_ssp_stp_smp; dmSASSubID.target_ssp_stp_smp = oneExpDeviceData->target_ssp_stp_smp; oneExpDeviceData->registered = agTRUE; dmAddSASToSharedcontext(dmRoot, onePortContext, &dmSASSubID, oneExpDeviceData, 0xFF); } else { DM_DBG1(("dmFullDiscover:oneExpDeviceData is NULL!!!\n")); return DM_RC_FAILURE; } dmUpStreamDiscoverStart(dmRoot, onePortContext); return DM_RC_SUCCESS; } osGLOBAL bit32 dmIncrementalDiscover( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, bit32 flag ) { dmExpander_t *oneExpander = agNULL; dmSASSubID_t dmSASSubID; dmDeviceData_t *oneExpDeviceData = agNULL; DM_DBG1(("dmIncrementalDiscover: start\n")); if (onePortContext->valid == agFALSE) { DM_DBG1(("dmIncrementalDiscover: invalid port!!!\n")); return DM_RC_FAILURE; } /* TDM triggerred; let go DM triggerred */ if (flag == agFALSE) { if (onePortContext->DiscoveryState == DM_DSTATE_STARTED) { DM_DBG1(("dmIncrementalDiscover: no two instances of discovery allowed!!!\n")); return DM_RC_FAILURE; } } onePortContext->DiscoveryState = DM_DSTATE_STARTED; onePortContext->discovery.type = DM_DISCOVERY_OPTION_INCREMENTAL_START; dmSASSubID.sasAddressHi = onePortContext->sasRemoteAddressHi; dmSASSubID.sasAddressLo = onePortContext->sasRemoteAddressLo; /* check OnePortContext->discovery.discoveringExpanderList */ oneExpander = dmExpFind(dmRoot, onePortContext, dmSASSubID.sasAddressHi, dmSASSubID.sasAddressLo); if (oneExpander != agNULL) { oneExpDeviceData = oneExpander->dmDevice; } else { /* check dmAllShared->mainExpanderList */ oneExpander = dmExpMainListFind(dmRoot, onePortContext, dmSASSubID.sasAddressHi, dmSASSubID.sasAddressLo); if (oneExpander != agNULL) { oneExpDeviceData = oneExpander->dmDevice; } } if (oneExpDeviceData != agNULL) { dmSASSubID.initiator_ssp_stp_smp = oneExpDeviceData->initiator_ssp_stp_smp; dmSASSubID.target_ssp_stp_smp = oneExpDeviceData->target_ssp_stp_smp; oneExpDeviceData->registered = agTRUE; dmAddSASToSharedcontext(dmRoot, onePortContext, &dmSASSubID, oneExpDeviceData, 0xFF); } else { DM_DBG1(("dmIncrementalDiscover:oneExpDeviceData is NULL!!!\n")); return DM_RC_FAILURE; } dmUpStreamDiscoverStart(dmRoot, onePortContext); return DM_RC_SUCCESS; } osGLOBAL void dmUpStreamDiscoverStart( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { // dmExpander_t *oneExpander = agNULL; bit32 sasAddressHi, sasAddressLo; dmDeviceData_t *oneDeviceData; dmExpander_t *oneExpander = agNULL; DM_DBG3(("dmUpStreamDiscoverStart: start\n")); if (onePortContext->valid == agFALSE) { DM_DBG1(("dmUpStreamDiscoverStart: invalid port!!!\n")); return; } /* at this point, the 1st expander should have been registered. find an expander from onePortContext */ sasAddressHi = onePortContext->sasRemoteAddressHi; sasAddressLo = onePortContext->sasRemoteAddressLo; DM_DBG3(("dmUpStreamDiscoverStart: Port Remote AddrHi 0x%08x Remote AddrLo 0x%08x\n", sasAddressHi, sasAddressLo)); oneDeviceData = dmDeviceFind(dmRoot, onePortContext, sasAddressHi, sasAddressLo); // oneDeviceData = oneExpander->dmDevice; // start here onePortContext->discovery.status = DISCOVERY_UP_STREAM; if (oneDeviceData == agNULL) { DM_DBG1(("dmUpStreamDiscoverStart: oneExpander is NULL, wrong!!!\n")); return; } else { if ( (oneDeviceData->SASSpecDeviceType == SAS_EDGE_EXPANDER_DEVICE) || (oneDeviceData->SASSpecDeviceType == SAS_FANOUT_EXPANDER_DEVICE) || DEVICE_IS_SMP_TARGET(oneDeviceData) ) { #if 1 /* for incremental discovery */ /* start here: if not on discoveringExpanderList, alloc and add dmNewEXPorNot() */ oneExpander = dmExpFind(dmRoot, onePortContext, sasAddressHi, sasAddressLo); if ( oneExpander == agNULL) { /* alloc and add */ oneExpander = dmDiscoveringExpanderAlloc(dmRoot, onePortContext, oneDeviceData); if ( oneExpander != agNULL) { dmDiscoveringExpanderAdd(dmRoot, onePortContext, oneExpander); } else { DM_DBG1(("dmUpStreamDiscoverStart: failed to allocate expander or discovey aborted!!!\n")); return; } } #endif dmUpStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } else { DM_DBG1(("dmUpStreamDiscoverStart: oneDeviceData is not an Expander did %d, wrong!!!\n", oneDeviceData->id)); return; } } return; } /* sends report general */ osGLOBAL void dmUpStreamDiscovering( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmDeviceData_t *oneDeviceData ) { dmList_t *ExpanderList; dmExpander_t *oneNextExpander = agNULL; DM_DBG3(("dmUpStreamDiscovering: start\n")); if (onePortContext->valid == agFALSE) { DM_DBG1(("dmUpStreamDiscovering: invalid port!!!\n")); return; } tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); if (DMLIST_EMPTY(&(onePortContext->discovery.discoveringExpanderList))) { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); DM_DBG3(("dmUpStreamDiscovering: should be the end\n")); oneNextExpander = agNULL; } else { DMLIST_DEQUEUE_FROM_HEAD(&ExpanderList, &(onePortContext->discovery.discoveringExpanderList)); oneNextExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); if ( oneNextExpander != agNULL) { DMLIST_ENQUEUE_AT_HEAD(&(oneNextExpander->linkNode), &(onePortContext->discovery.discoveringExpanderList)); DM_DBG3(("dmUpStreamDiscovering tdsaSASUpStreamDiscovering: dequeue head\n")); DM_DBG3(("dmUpStreamDiscovering: expander id %d\n", oneNextExpander->id)); } else { DM_DBG1(("dmUpStreamDiscovering: oneNextExpander is NULL!!!\n")); } tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } if (oneNextExpander != agNULL) { dmReportGeneralSend(dmRoot, oneNextExpander->dmDevice); } else { DM_DBG3(("dmUpStreamDiscovering: No more expander list\n")); dmDownStreamDiscoverStart(dmRoot, onePortContext, oneDeviceData); } return; } osGLOBAL void dmDownStreamDiscoverStart( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmDeviceData_t *oneDeviceData ) { dmExpander_t *UpStreamExpander; dmExpander_t *oneExpander; DM_DBG3(("dmDownStreamDiscoverStart: start\n")); if (dmDiscoverCheck(dmRoot, onePortContext) == agTRUE) { DM_DBG1(("dmDownStreamDiscoverStart: invalid port or aborted discovery!!!\n")); return; } /* set discovery status */ onePortContext->discovery.status = DISCOVERY_DOWN_STREAM; /* If it's an expander */ if ( (oneDeviceData->SASSpecDeviceType == SAS_EDGE_EXPANDER_DEVICE) || (oneDeviceData->SASSpecDeviceType == SAS_FANOUT_EXPANDER_DEVICE) || DEVICE_IS_SMP_TARGET(oneDeviceData) ) { oneExpander = oneDeviceData->dmExpander; UpStreamExpander = oneExpander->dmUpStreamExpander; /* If the two expanders are the root of two edge sets; sub-to-sub */ if ( (UpStreamExpander != agNULL) && ( UpStreamExpander->dmUpStreamExpander == oneExpander ) ) { DM_DBG3(("dmDownStreamDiscoverStart: Root found pExpander=%p pUpStreamExpander=%p\n", oneExpander, UpStreamExpander)); //Saves the root expander onePortContext->discovery.RootExp = oneExpander; DM_DBG3(("dmDownStreamDiscoverStart: Root exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmDownStreamDiscoverStart: Root exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); /* reset up stream inform for pExpander */ oneExpander->dmUpStreamExpander = agNULL; /* Add the pExpander to discovering list */ dmDiscoveringExpanderAdd(dmRoot, onePortContext, oneExpander); /* reset up stream inform for oneExpander */ UpStreamExpander->dmUpStreamExpander = agNULL; /* Add the UpStreamExpander to discovering list */ dmDiscoveringExpanderAdd(dmRoot, onePortContext, UpStreamExpander); } /* If the two expanders are not the root of two edge sets. eg) one root */ else { //Saves the root expander onePortContext->discovery.RootExp = oneExpander; DM_DBG3(("dmDownStreamDiscoverStart: NO Root pExpander=%p\n", oneExpander)); DM_DBG3(("dmDownStreamDiscoverStart: Root exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmDownStreamDiscoverStart: Root exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); /* (2.2.2.1) Add the pExpander to discovering list */ dmDiscoveringExpanderAdd(dmRoot, onePortContext, oneExpander); } } /* Continue down stream discovering */ dmDownStreamDiscovering(dmRoot, onePortContext, oneDeviceData); return; } osGLOBAL void dmDownStreamDiscovering( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmDeviceData_t *oneDeviceData ) { dmExpander_t *NextExpander = agNULL; dmList_t *ExpanderList; DM_DBG3(("dmDownStreamDiscovering: start\n")); if (dmDiscoverCheck(dmRoot, onePortContext) == agTRUE) { DM_DBG1(("dmDownStreamDiscovering: invalid port or aborted discovery!!!\n")); return; } tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); if (DMLIST_EMPTY(&(onePortContext->discovery.discoveringExpanderList))) { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); DM_DBG3(("dmDownStreamDiscovering: should be the end\n")); NextExpander = agNULL; } else { - DMLIST_DEQUEUE_FROM_HEAD(&ExpanderList, &(onePortContext->discovery.discoveringExpanderList));; + DMLIST_DEQUEUE_FROM_HEAD(&ExpanderList, &(onePortContext->discovery.discoveringExpanderList)); NextExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); if ( NextExpander != agNULL) { - DMLIST_ENQUEUE_AT_HEAD(&(NextExpander->linkNode), &(onePortContext->discovery.discoveringExpanderList));; + DMLIST_ENQUEUE_AT_HEAD(&(NextExpander->linkNode), &(onePortContext->discovery.discoveringExpanderList)); DM_DBG3(("dmDownStreamDiscovering tdsaSASDownStreamDiscovering: dequeue head\n")); DM_DBG3(("dmDownStreamDiscovering: expander id %d\n", NextExpander->id)); } else { DM_DBG1(("dmDownStreamDiscovering: NextExpander is NULL!!!\n")); } tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } /* If there is an expander for continue discoving */ if ( NextExpander != agNULL) { DM_DBG3(("dmDownStreamDiscovering: Found pNextExpander=%p discoveryStatus=0x%x\n", NextExpander, onePortContext->discovery.status)); switch (onePortContext->discovery.status) { /* If the discovery status is DISCOVERY_DOWN_STREAM */ case DISCOVERY_DOWN_STREAM: /* Send report general for the next expander */ DM_DBG3(("dmDownStreamDiscovering: DownStream pNextExpander=%p\n", NextExpander)); DM_DBG3(("dmDownStreamDiscovering: oneDeviceData %p did %d\n", oneDeviceData, oneDeviceData->id)); DM_DBG3(("dmDownStreamDiscovering: oneExpander %p did %d\n", oneDeviceData->dmExpander, oneDeviceData->dmExpander->id)); DM_DBG3(("dmDownStreamDiscovering: 2nd oneDeviceData %p did %d\n", NextExpander->dmDevice, NextExpander->dmDevice->id)); DM_DBG3(("dmDownStreamDiscovering: 2nd oneExpander %p did %d\n", NextExpander, NextExpander->id)); DM_DBG3(("dmDownStreamDiscovering: 2nd used oneExpander %p did %d\n", NextExpander->dmDevice->dmExpander, NextExpander->dmDevice->dmExpander->id)); if (NextExpander != NextExpander->dmDevice->dmExpander) { DM_DBG3(("dmDownStreamDiscovering: wrong!!!\n")); } dmReportGeneralSend(dmRoot, NextExpander->dmDevice); break; /* If the discovery status is DISCOVERY_CONFIG_ROUTING */ case DISCOVERY_CONFIG_ROUTING: case DISCOVERY_REPORT_PHY_SATA: /* set discovery status */ onePortContext->discovery.status = DISCOVERY_DOWN_STREAM; DM_DBG3(("dmDownStreamDiscovering: pPort->discovery.status=DISCOVERY_CONFIG_ROUTING, make it DOWN_STREAM\n")); /* If not the last phy */ if ( NextExpander->discoveringPhyId < NextExpander->dmDevice->numOfPhys ) { DM_DBG3(("dmDownStreamDiscovering: pNextExpander->discoveringPhyId=0x%x pNextExpander->numOfPhys=0x%x. Send More Discover\n", NextExpander->discoveringPhyId, NextExpander->dmDevice->numOfPhys)); /* Send discover for the next expander */ dmDiscoverSend(dmRoot, NextExpander->dmDevice); } /* If it's the last phy */ else { DM_DBG3(("dmDownStreamDiscovering: Last Phy, remove expander%p start DownStream=%p\n", NextExpander, NextExpander->dmDevice)); dmDiscoveringExpanderRemove(dmRoot, onePortContext, NextExpander); dmDownStreamDiscovering(dmRoot, onePortContext, NextExpander->dmDevice); } break; default: DM_DBG3(("dmDownStreamDiscovering: *** Unknown pPort->discovery.status=0x%x\n", onePortContext->discovery.status)); } } /* If no expander for continue discoving */ else { DM_DBG3(("dmDownStreamDiscovering: No more expander DONE\n")); /* discover done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_SUCCESS); } return; } osGLOBAL void dmUpStreamDiscoverExpanderPhy( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander, smpRespDiscover_t *pDiscoverResp ) { agsaSASIdentify_t sasIdentify; dmSASSubID_t dmSASSubID; bit32 attachedSasHi, attachedSasLo; dmExpander_t *AttachedExpander = agNULL; bit8 connectionRate; dmDeviceData_t *oneDeviceData = agNULL; dmDeviceData_t *AttachedDevice = agNULL; dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; DM_DBG3(("dmUpStreamDiscoverExpanderPhy: start\n")); if (dmDiscoverCheck(dmRoot, onePortContext) == agTRUE) { DM_DBG1(("dmUpStreamDiscoverExpanderPhy: invalid port or aborted discovery!!!\n")); return; } if (oneExpander != oneExpander->dmDevice->dmExpander) { DM_DBG1(("dmUpStreamDiscoverExpanderPhy: wrong!!!\n")); } dm_memset(&sasIdentify, 0, sizeof(agsaSASIdentify_t)); oneDeviceData = oneExpander->dmDevice; DM_DBG3(("dmUpStreamDiscoverExpanderPhy: Phy #%d of SAS %08x-%08x\n", oneExpander->discoveringPhyId, oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG3((" Attached device: %s\n", ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 0 ? "No Device" : (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 1 ? "End Device" : (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 2 ? "Edge Expander" : "Fanout Expander"))))); if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { DM_DBG3((" SAS address : %08x-%08x\n", DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp), DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp))); DM_DBG3((" SSP Target : %d\n", DISCRSP_IS_SSP_TARGET(pDiscoverResp)?1:0)); DM_DBG3((" STP Target : %d\n", DISCRSP_IS_STP_TARGET(pDiscoverResp)?1:0)); DM_DBG3((" SMP Target : %d\n", DISCRSP_IS_SMP_TARGET(pDiscoverResp)?1:0)); DM_DBG3((" SATA DEVICE : %d\n", DISCRSP_IS_SATA_DEVICE(pDiscoverResp)?1:0)); DM_DBG3((" SSP Initiator : %d\n", DISCRSP_IS_SSP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG3((" STP Initiator : %d\n", DISCRSP_IS_STP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG3((" SMP Initiator : %d\n", DISCRSP_IS_SMP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG3((" Phy ID : %d\n", pDiscoverResp->phyIdentifier)); DM_DBG3((" Attached Phy ID: %d\n", pDiscoverResp->attachedPhyIdentifier)); } /* for debugging */ if (oneExpander->discoveringPhyId != pDiscoverResp->phyIdentifier) { DM_DBG1(("dmUpStreamDiscoverExpanderPhy: !!! Incorrect SMP response !!!\n")); DM_DBG1(("dmUpStreamDiscoverExpanderPhy: Request PhyID #%d Response PhyID #%d !!!\n", oneExpander->discoveringPhyId, pDiscoverResp->phyIdentifier)); dmhexdump("NO_DEVICE", (bit8*)pDiscoverResp, sizeof(smpRespDiscover_t)); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); return; } /* saving routing attribute for non self-configuring expanders */ oneExpander->routingAttribute[pDiscoverResp->phyIdentifier] = (bit8)DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp); if ( oneDeviceData->SASSpecDeviceType == SAS_FANOUT_EXPANDER_DEVICE ) { DM_DBG3(("dmUpStreamDiscoverExpanderPhy: SA_SAS_DEV_TYPE_FANOUT_EXPANDER\n")); if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE) { DM_DBG1(("dmUpStreamDiscoverExpanderPhy: **** Topology Error subtractive routing on fanout expander device!!!\n")); /* discovery error */ onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmUpStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* (2.1.3) discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); return; } } else { DM_DBG3(("dmUpStreamDiscoverExpanderPhy: SA_SAS_DEV_TYPE_EDGE_EXPANDER\n")); if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { /* Setup sasIdentify for the attached device */ sasIdentify.phyIdentifier = pDiscoverResp->phyIdentifier; sasIdentify.deviceType_addressFrameType = (bit8)(pDiscoverResp->attachedDeviceType & 0x70); sasIdentify.initiator_ssp_stp_smp = pDiscoverResp->attached_Ssp_Stp_Smp_Sata_Initiator; sasIdentify.target_ssp_stp_smp = pDiscoverResp->attached_SataPS_Ssp_Stp_Smp_Sata_Target; *(bit32*)sasIdentify.sasAddressHi = *(bit32*)pDiscoverResp->attachedSasAddressHi; *(bit32*)sasIdentify.sasAddressLo = *(bit32*)pDiscoverResp->attachedSasAddressLo; /* incremental discovery */ dmSASSubID.sasAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify); dmSASSubID.sasAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify); dmSASSubID.initiator_ssp_stp_smp = sasIdentify.initiator_ssp_stp_smp; dmSASSubID.target_ssp_stp_smp = sasIdentify.target_ssp_stp_smp; attachedSasHi = DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp); attachedSasLo = DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp); /* If the phy has subtractive routing attribute */ if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE) { DM_DBG3(("dmUpStreamDiscoverExpanderPhy: SA_SAS_ROUTING_SUBTRACTIVE\n")); /* Setup upstream phys */ dmExpanderUpStreamPhyAdd(dmRoot, oneExpander, (bit8) pDiscoverResp->attachedPhyIdentifier); /* If the expander already has an upsteam device set up */ if (oneExpander->hasUpStreamDevice == agTRUE) { /* just to update MCN */ dmPortSASDeviceFind(dmRoot, onePortContext, attachedSasLo, attachedSasHi, oneDeviceData); /* If the sas address doesn't match */ if ( ((oneExpander->upStreamSASAddressHi != attachedSasHi) || (oneExpander->upStreamSASAddressLo != attachedSasLo)) && (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE || DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) ) { /* TODO: discovery error, callback */ DM_DBG1(("dmUpStreamDiscoverExpanderPhy: **** Topology Error subtractive routing error - inconsistent SAS address!!!\n")); /* call back to notify discovery error */ onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmUpStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } else { /* Setup SAS address for up stream device */ oneExpander->hasUpStreamDevice = agTRUE; oneExpander->upStreamSASAddressHi = attachedSasHi; oneExpander->upStreamSASAddressLo = attachedSasLo; if ( (onePortContext->sasLocalAddressHi != attachedSasHi) || (onePortContext->sasLocalAddressLo != attachedSasLo) ) { /* Find the device from the discovered list */ AttachedDevice = dmPortSASDeviceFind(dmRoot, onePortContext, attachedSasLo, attachedSasHi, oneDeviceData); /* New device, If the device has been discovered before */ if ( AttachedDevice != agNULL) /* old device */ { DM_DBG3(("dmUpStreamDiscoverExpanderPhy: Seen This Device Before\n")); /* If attached device is an edge expander */ if ( AttachedDevice->SASSpecDeviceType == SAS_EDGE_EXPANDER_DEVICE) { /* The attached device is an expander */ AttachedExpander = AttachedDevice->dmExpander; /* If the two expanders are the root of the two edge expander sets */ if ( (AttachedExpander->upStreamSASAddressHi == DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo)) && (AttachedExpander->upStreamSASAddressLo == DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo)) ) { /* Setup upstream expander for the pExpander */ oneExpander->dmUpStreamExpander = AttachedExpander; } /* If the two expanders are not the root of the two edge expander sets */ else { /* TODO: loop found, discovery error, callback */ DM_DBG1(("dmUpStreamDiscoverExpanderPhy: **** Topology Error loop detection!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmUpStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } /* If attached device is not an edge expander */ else { /*TODO: should not happen, ASSERT */ DM_DBG3(("dmUpStreamDiscoverExpanderPhy, *** Attached Device is not Edge. Confused!!!\n")); } } /* AttachedExpander != agNULL */ /* New device, If the device has not been discovered before */ else /* new device */ { /* Add the device */ DM_DBG3(("dmUpStreamDiscoverExpanderPhy: New device\n")); /* read minimum rate from the configuration onePortContext->LinkRate is SPC's local link rate */ connectionRate = (bit8)MIN(onePortContext->LinkRate, DISCRSP_GET_LINKRATE(pDiscoverResp)); DM_DBG3(("dmUpStreamDiscoverExpanderPhy: link rate 0x%x\n", onePortContext->LinkRate)); DM_DBG3(("dmUpStreamDiscoverExpanderPhy: negotiatedPhyLinkRate 0x%x\n", DISCRSP_GET_LINKRATE(pDiscoverResp))); DM_DBG3(("dmUpStreamDiscoverExpanderPhy: connectionRate 0x%x\n", connectionRate)); if (DISCRSP_IS_STP_TARGET(pDiscoverResp) || DISCRSP_IS_SATA_DEVICE(pDiscoverResp)) { /* incremental discovery */ if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, STP_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = dmFindRegNValid( dmRoot, onePortContext, &dmSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, STP_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } } } /* DISCRSP_IS_STP_TARGET(pDiscoverResp) || DISCRSP_IS_SATA_DEVICE(pDiscoverResp) */ else { /* incremental discovery */ if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, SAS_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = dmFindRegNValid( dmRoot, onePortContext, &dmSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, SAS_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } } } /* If the device is added successfully */ if ( AttachedDevice != agNULL) { /* (3.1.2.3.2.3.2.1) callback about new device */ if ( DISCRSP_IS_SSP_TARGET(pDiscoverResp) || DISCRSP_IS_SSP_INITIATOR(pDiscoverResp) || DISCRSP_IS_SMP_INITIATOR(pDiscoverResp) || DISCRSP_IS_SMP_INITIATOR(pDiscoverResp) ) { DM_DBG3(("dmUpStreamDiscoverExpanderPhy: Found SSP/SMP SAS %08x-%08x\n", attachedSasHi, attachedSasLo)); } else { DM_DBG3(("dmUpStreamDiscoverExpanderPhy: Found a SAS STP device.\n")); } /* If the attached device is an expander */ if ( (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) || (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) ) { /* Allocate an expander data structure */ AttachedExpander = dmDiscoveringExpanderAlloc( dmRoot, onePortContext, AttachedDevice ); DM_DBG3(("dmUpStreamDiscoverExpanderPhy: Found expander=%p\n", AttachedExpander)); /* If allocate successfully */ if ( AttachedExpander != agNULL) { /* Add the pAttachedExpander to discovering list */ dmDiscoveringExpanderAdd(dmRoot, onePortContext, AttachedExpander); /* Setup upstream expander for the pExpander */ oneExpander->dmUpStreamExpander = AttachedExpander; } /* If failed to allocate */ else { DM_DBG1(("dmUpStreamDiscoverExpanderPhy: Failed to allocate expander data structure!!!\n")); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } /* If the attached device is an end device */ else { DM_DBG3(("dmUpStreamDiscoverExpanderPhy: Found end device\n")); /* LP2006-05-26 added upstream device to the newly found device */ AttachedDevice->dmExpander = oneExpander; oneExpander->dmUpStreamExpander = agNULL; } } else { DM_DBG1(("dmUpStreamDiscoverExpanderPhy: Failed to add a device!!!\n")); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } /* else, new device */ } /* onePortContext->sasLocalAddressLo != attachedSasLo */ } /* else */ } /* DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE */ } /* DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE */ } /* big else */ oneExpander->discoveringPhyId ++; if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { DM_DBG3(("dmUpStreamDiscoverExpanderPhy: DISCOVERY_UP_STREAM find more ...\n")); /* continue discovery for the next phy */ dmDiscoverSend(dmRoot, oneDeviceData); } else { DM_DBG3(("dmUpStreamDiscoverExpanderPhy: DISCOVERY_UP_STREAM last phy continue upstream..\n")); /* for MCN */ dmUpdateAllAdjacent(dmRoot, onePortContext, oneDeviceData); /* remove the expander from the discovering list */ dmDiscoveringExpanderRemove(dmRoot, onePortContext, oneExpander); /* continue upstream discovering */ dmUpStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } } else { DM_DBG3(("dmUpStreamDiscoverExpanderPhy: onePortContext->discovery.status not in DISCOVERY_UP_STREAM; status %d\n", onePortContext->discovery.status)); } DM_DBG3(("dmUpStreamDiscoverExpanderPhy: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } osGLOBAL void dmUpStreamDiscover2ExpanderPhy( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander, smpRespDiscover2_t *pDiscoverResp ) { dmDeviceData_t *oneDeviceData; dmDeviceData_t *AttachedDevice = agNULL; dmExpander_t *AttachedExpander; agsaSASIdentify_t sasIdentify; bit8 connectionRate; bit32 attachedSasHi, attachedSasLo; dmSASSubID_t dmSASSubID; dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: start\n")); if (dmDiscoverCheck(dmRoot, onePortContext) == agTRUE) { DM_DBG1(("dmUpStreamDiscover2ExpanderPhy: invalid port or aborted discovery!!!\n")); return; } if (oneExpander != oneExpander->dmDevice->dmExpander) { DM_DBG1(("dmUpStreamDiscover2ExpanderPhy: wrong!!!\n")); } dm_memset(&sasIdentify, 0, sizeof(agsaSASIdentify_t)); oneDeviceData = oneExpander->dmDevice; DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: Phy #%d of SAS %08x-%08x\n", oneExpander->discoveringPhyId, oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG2((" Attached device: %s\n", ( SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 0 ? "No Device" : (SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 1 ? "End Device" : (SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 2 ? "Edge Expander" : "Fanout Expander"))))); if ( SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { DM_DBG2((" SAS address : %08x-%08x\n", SAS2_DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp), SAS2_DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp))); DM_DBG2((" SSP Target : %d\n", SAS2_DISCRSP_IS_SSP_TARGET(pDiscoverResp)?1:0)); DM_DBG2((" STP Target : %d\n", SAS2_DISCRSP_IS_STP_TARGET(pDiscoverResp)?1:0)); DM_DBG2((" SMP Target : %d\n", SAS2_DISCRSP_IS_SMP_TARGET(pDiscoverResp)?1:0)); DM_DBG2((" SATA DEVICE : %d\n", SAS2_DISCRSP_IS_SATA_DEVICE(pDiscoverResp)?1:0)); DM_DBG2((" SSP Initiator : %d\n", SAS2_DISCRSP_IS_SSP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG2((" STP Initiator : %d\n", SAS2_DISCRSP_IS_STP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG2((" SMP Initiator : %d\n", SAS2_DISCRSP_IS_SMP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG2((" Phy ID : %d\n", pDiscoverResp->phyIdentifier)); DM_DBG2((" Attached Phy ID: %d\n", pDiscoverResp->attachedPhyIdentifier)); } if (oneExpander->discoveringPhyId != pDiscoverResp->phyIdentifier) { DM_DBG1(("dmUpStreamDiscover2ExpanderPhy: !!! Incorrect SMP response !!!\n")); DM_DBG1(("dmUpStreamDiscover2ExpanderPhy: Request PhyID #%d Response PhyID #%d\n", oneExpander->discoveringPhyId, pDiscoverResp->phyIdentifier)); dmhexdump("NO_DEVICE", (bit8*)pDiscoverResp, sizeof(smpRespDiscover2_t)); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); return; } /* saving routing attribute for non self-configuring expanders */ oneExpander->routingAttribute[pDiscoverResp->phyIdentifier] = SAS2_DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp); if ( oneDeviceData->SASSpecDeviceType == SAS_FANOUT_EXPANDER_DEVICE ) { DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: SA_SAS_DEV_TYPE_FANOUT_EXPANDER\n")); if ( SAS2_DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE) { DM_DBG1(("dmUpStreamDiscover2ExpanderPhy: **** Topology Error subtractive routing on fanout expander device!!!\n")); /* discovery error */ onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmUpStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* (2.1.3) discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); return; } } else { DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: SA_SAS_DEV_TYPE_EDGE_EXPANDER\n")); if ( SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { /* Setup sasIdentify for the attached device */ sasIdentify.phyIdentifier = pDiscoverResp->phyIdentifier; sasIdentify.deviceType_addressFrameType = pDiscoverResp->attachedDeviceTypeReason & 0x70; sasIdentify.initiator_ssp_stp_smp = pDiscoverResp->attached_Ssp_Stp_Smp_Sata_Initiator; sasIdentify.target_ssp_stp_smp = pDiscoverResp->attached_SataPS_Ssp_Stp_Smp_Sata_Target; *(bit32*)sasIdentify.sasAddressHi = *(bit32*)pDiscoverResp->attachedSasAddressHi; *(bit32*)sasIdentify.sasAddressLo = *(bit32*)pDiscoverResp->attachedSasAddressLo; /* incremental discovery */ dmSASSubID.sasAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify); dmSASSubID.sasAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify); dmSASSubID.initiator_ssp_stp_smp = sasIdentify.initiator_ssp_stp_smp; dmSASSubID.target_ssp_stp_smp = sasIdentify.target_ssp_stp_smp; attachedSasHi = SAS2_DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp); attachedSasLo = SAS2_DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp); /* If the phy has subtractive routing attribute */ if ( SAS2_DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE) { DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: SA_SAS_ROUTING_SUBTRACTIVE\n")); /* Setup upstream phys */ dmExpanderUpStreamPhyAdd(dmRoot, oneExpander, (bit8) pDiscoverResp->attachedPhyIdentifier); /* If the expander already has an upsteam device set up */ if (oneExpander->hasUpStreamDevice == agTRUE) { /* just to update MCN */ dmPortSASDeviceFind(dmRoot, onePortContext, attachedSasLo, attachedSasHi, oneDeviceData); /* If the sas address doesn't match */ if ( ((oneExpander->upStreamSASAddressHi != attachedSasHi) || (oneExpander->upStreamSASAddressLo != attachedSasLo)) && (SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE || SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) ) { /* TODO: discovery error, callback */ DM_DBG1(("dmUpStreamDiscover2ExpanderPhy: **** Topology Error subtractive routing error - inconsistent SAS address!!!\n")); /* call back to notify discovery error */ onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmUpStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } else { /* Setup SAS address for up stream device */ oneExpander->hasUpStreamDevice = agTRUE; oneExpander->upStreamSASAddressHi = attachedSasHi; oneExpander->upStreamSASAddressLo = attachedSasLo; if ( (onePortContext->sasLocalAddressHi != attachedSasHi) || (onePortContext->sasLocalAddressLo != attachedSasLo) ) { /* Find the device from the discovered list */ AttachedDevice = dmPortSASDeviceFind(dmRoot, onePortContext, attachedSasLo, attachedSasHi, oneDeviceData); /* If the device has been discovered before */ if ( AttachedDevice != agNULL) { DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: Seen This Device Before\n")); /* If attached device is an edge expander */ if ( AttachedDevice->SASSpecDeviceType == SAS_EDGE_EXPANDER_DEVICE) { /* The attached device is an expander */ AttachedExpander = AttachedDevice->dmExpander; /* If the two expanders are the root of the two edge expander sets */ if ( (AttachedExpander->upStreamSASAddressHi == DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo)) && (AttachedExpander->upStreamSASAddressLo == DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo)) ) { /* Setup upstream expander for the pExpander */ oneExpander->dmUpStreamExpander = AttachedExpander; } /* If the two expanders are not the root of the two edge expander sets */ else { /* TODO: loop found, discovery error, callback */ DM_DBG1(("dmUpStreamDiscover2ExpanderPhy: **** Topology Error loop detection!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmUpStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } /* If attached device is not an edge expander */ else { /*TODO: should not happen, ASSERT */ DM_DBG1(("dmUpStreamDiscover2ExpanderPhy, *** Attached Device is not Edge. Confused!!!\n")); } } /* If the device has not been discovered before */ else { /* Add the device */ DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: New device\n")); /* read minimum rate from the configuration onePortContext->LinkRate is SPC's local link rate */ connectionRate = MIN(onePortContext->LinkRate, SAS2_DISCRSP_GET_LOGICAL_LINKRATE(pDiscoverResp)); DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: link rate 0x%x\n", onePortContext->LinkRate)); DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: negotiatedPhyLinkRate 0x%x\n", SAS2_DISCRSP_GET_LINKRATE(pDiscoverResp))); DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: connectionRate 0x%x\n", connectionRate)); //hhhhhhhh if (SAS2_DISCRSP_IS_STP_TARGET(pDiscoverResp) || SAS2_DISCRSP_IS_SATA_DEVICE(pDiscoverResp)) { /* incremental discovery */ if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, STP_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = dmFindRegNValid( dmRoot, onePortContext, &dmSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, STP_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } } } else { /* incremental discovery */ if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, SAS_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = dmFindRegNValid( dmRoot, onePortContext, &dmSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, SAS_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } } } /* If the device is added successfully */ if ( AttachedDevice != agNULL) { /* (3.1.2.3.2.3.2.1) callback about new device */ if ( SAS2_DISCRSP_IS_SSP_TARGET(pDiscoverResp) || SAS2_DISCRSP_IS_SSP_INITIATOR(pDiscoverResp) || SAS2_DISCRSP_IS_SMP_INITIATOR(pDiscoverResp) || SAS2_DISCRSP_IS_SMP_INITIATOR(pDiscoverResp) ) { DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: Found SSP/SMP SAS %08x-%08x\n", attachedSasHi, attachedSasLo)); } else { DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: Found a SAS STP device.\n")); } /* If the attached device is an expander */ if ( (SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) || (SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) ) { /* Allocate an expander data structure */ AttachedExpander = dmDiscoveringExpanderAlloc( dmRoot, onePortContext, AttachedDevice ); DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: Found expander=%p\n", AttachedExpander)); /* If allocate successfully */ if ( AttachedExpander != agNULL) { /* Add the pAttachedExpander to discovering list */ dmDiscoveringExpanderAdd(dmRoot, onePortContext, AttachedExpander); /* Setup upstream expander for the pExpander */ oneExpander->dmUpStreamExpander = AttachedExpander; } /* If failed to allocate */ else { DM_DBG1(("dmUpStreamDiscover2ExpanderPhy, Failed to allocate expander data structure!!!\n")); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } /* If the attached device is an end device */ else { DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: Found end device\n")); /* LP2006-05-26 added upstream device to the newly found device */ AttachedDevice->dmExpander = oneExpander; oneExpander->dmUpStreamExpander = agNULL; } } else { DM_DBG1(("dmUpStreamDiscover2ExpanderPhy, Failed to add a device!!!\n")); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } } } } /* substractive routing */ } } oneExpander->discoveringPhyId ++; if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: DISCOVERY_UP_STREAM find more ...\n")); /* continue discovery for the next phy */ dmDiscoverSend(dmRoot, oneDeviceData); } else { DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: DISCOVERY_UP_STREAM last phy continue upstream..\n")); /* for MCN */ dmUpdateAllAdjacent(dmRoot, onePortContext, oneDeviceData); /* remove the expander from the discovering list */ dmDiscoveringExpanderRemove(dmRoot, onePortContext, oneExpander); /* continue upstream discovering */ dmUpStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } } else { DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: onePortContext->discovery.status not in DISCOVERY_UP_STREAM; status %d\n", onePortContext->discovery.status)); } DM_DBG2(("dmUpStreamDiscover2ExpanderPhy: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } osGLOBAL void dmDownStreamDiscoverExpanderPhy( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander, smpRespDiscover_t *pDiscoverResp ) { agsaSASIdentify_t sasIdentify; dmSASSubID_t dmSASSubID; bit32 attachedSasHi, attachedSasLo; dmExpander_t *AttachedExpander; dmExpander_t *UpStreamExpander; dmExpander_t *ConfigurableExpander = agNULL; bit8 connectionRate, negotiatedPhyLinkRate; bit32 configSASAddressHi; bit32 configSASAddressLo; bit32 dupConfigSASAddr = agFALSE; dmDeviceData_t *oneDeviceData; dmDeviceData_t *AttachedDevice = agNULL; bit32 SAS2SAS11Check = agFALSE; dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; DM_DBG3(("dmDownStreamDiscoverExpanderPhy: start\n")); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); DM_ASSERT(dmRoot, "(dmDownStreamDiscoverExpanderPhy) dmRoot NULL"); DM_ASSERT(onePortContext, "(dmDownStreamDiscoverExpanderPhy) pPort NULL"); DM_ASSERT(oneExpander, "(dmDownStreamDiscoverExpanderPhy) pExpander NULL"); DM_ASSERT(pDiscoverResp, "(dmDownStreamDiscoverExpanderPhy) pDiscoverResp NULL"); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: onePortContxt=%p oneExpander=%p\n", onePortContext, oneExpander)); if (dmDiscoverCheck(dmRoot, onePortContext) == agTRUE) { DM_DBG1(("dmDownStreamDiscoverExpanderPhy: invalid port or aborted discovery!!!\n")); return; } if (oneExpander != oneExpander->dmDevice->dmExpander) { DM_DBG1(("dmDownStreamDiscoverExpanderPhy: wrong!!!\n")); } /* (1) Find the device structure of the expander */ oneDeviceData = oneExpander->dmDevice; DM_ASSERT(oneDeviceData, "(dmDownStreamDiscoverExpanderPhy) pDevice NULL"); /* for debugging */ DM_DBG3(("dmDownStreamDiscoverExpanderPhy: Phy #%d of SAS %08x-%08x\n", oneExpander->discoveringPhyId, oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG3((" Attached device: %s\n", ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 0 ? "No Device" : (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 1 ? "End Device" : (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 2 ? "Edge Expander" : "Fanout Expander"))))); /* for debugging */ if (oneExpander->discoveringPhyId != pDiscoverResp->phyIdentifier) { DM_DBG1(("dmDownStreamDiscoverExpanderPhy: !!! Incorrect SMP response !!!\n")); DM_DBG1(("dmDownStreamDiscoverExpanderPhy: Request PhyID #%d Response PhyID #%d !!!\n", oneExpander->discoveringPhyId, pDiscoverResp->phyIdentifier)); dmhexdump("NO_DEVICE", (bit8*)pDiscoverResp, sizeof(smpRespDiscover_t)); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); return; } if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { DM_DBG3((" SAS address : %08x-%08x\n", DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp), DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp))); DM_DBG3((" SSP Target : %d\n", DISCRSP_IS_SSP_TARGET(pDiscoverResp)?1:0)); DM_DBG3((" STP Target : %d\n", DISCRSP_IS_STP_TARGET(pDiscoverResp)?1:0)); DM_DBG3((" SMP Target : %d\n", DISCRSP_IS_SMP_TARGET(pDiscoverResp)?1:0)); DM_DBG3((" SATA DEVICE : %d\n", DISCRSP_IS_SATA_DEVICE(pDiscoverResp)?1:0)); DM_DBG3((" SSP Initiator : %d\n", DISCRSP_IS_SSP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG3((" STP Initiator : %d\n", DISCRSP_IS_STP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG3((" SMP Initiator : %d\n", DISCRSP_IS_SMP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG3((" Phy ID : %d\n", pDiscoverResp->phyIdentifier)); DM_DBG3((" Attached Phy ID: %d\n", pDiscoverResp->attachedPhyIdentifier)); } /* end for debugging */ /* saving routing attribute for non self-configuring expanders */ oneExpander->routingAttribute[pDiscoverResp->phyIdentifier] = DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp); oneExpander->discoverSMPAllowed = agTRUE; /* If a device is attached */ if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { /* Setup sasIdentify for the attached device */ sasIdentify.phyIdentifier = pDiscoverResp->phyIdentifier; sasIdentify.deviceType_addressFrameType = pDiscoverResp->attachedDeviceType & 0x70; sasIdentify.initiator_ssp_stp_smp = pDiscoverResp->attached_Ssp_Stp_Smp_Sata_Initiator; sasIdentify.target_ssp_stp_smp = pDiscoverResp->attached_SataPS_Ssp_Stp_Smp_Sata_Target; *(bit32*)sasIdentify.sasAddressHi = *(bit32*)pDiscoverResp->attachedSasAddressHi; *(bit32*)sasIdentify.sasAddressLo = *(bit32*)pDiscoverResp->attachedSasAddressLo; /* incremental discovery */ dmSASSubID.sasAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify); dmSASSubID.sasAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify); dmSASSubID.initiator_ssp_stp_smp = sasIdentify.initiator_ssp_stp_smp; dmSASSubID.target_ssp_stp_smp = sasIdentify.target_ssp_stp_smp; attachedSasHi = DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp); attachedSasLo = DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp); /* If it's a direct routing */ if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_DIRECT) { /* If the attached device is an expander */ if ( (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) || (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) ) { DM_DBG1(("dmDownStreamDiscoverExpanderPhy: **** Topology Error direct routing can't connect to expander!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); return; } } /* If the expander's attached device is not myself */ if ( (attachedSasHi != onePortContext->sasLocalAddressHi) || (attachedSasLo != onePortContext->sasLocalAddressLo) ) { /* Find the attached device from discovered list */ AttachedDevice = dmPortSASDeviceFind(dmRoot, onePortContext, attachedSasLo, attachedSasHi, oneDeviceData); /* If the device has not been discovered before */ if ( AttachedDevice == agNULL) //11 { /* If the phy has subtractive routing attribute */ if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE && (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE || DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) ) { /* TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscoverExpanderPhy: Deferred!!! **** Topology Error subtractive routing error - inconsistent SAS address!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); onePortContext->discovery.DeferredError = agTRUE; } else /* 11 */ { /* Add the device */ /* read minimum rate from the configuration onePortContext->LinkRate is SPC's local link rate */ connectionRate = MIN(onePortContext->LinkRate, DISCRSP_GET_LINKRATE(pDiscoverResp)); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: link rate 0x%x\n", DEVINFO_GET_LINKRATE(&oneDeviceData->agDeviceInfo))); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: negotiatedPhyLinkRate 0x%x\n", DISCRSP_GET_LINKRATE(pDiscoverResp))); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: connectionRate 0x%x\n", connectionRate)); if (DISCRSP_IS_STP_TARGET(pDiscoverResp) || DISCRSP_IS_SATA_DEVICE(pDiscoverResp)) { if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, STP_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = dmFindRegNValid( dmRoot, onePortContext, &dmSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, STP_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } } } /* DISCRSP_IS_STP_TARGET(pDiscoverResp) || DISCRSP_IS_SATA_DEVICE(pDiscoverResp) */ else /* 22 */ { if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, SAS_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = dmFindRegNValid( dmRoot, onePortContext, &dmSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, SAS_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } } } /* else 22 */ DM_DBG3(("dmDownStreamDiscoverExpanderPhy: newDevice pDevice=%p\n", AttachedDevice)); /* If the device is added successfully */ if ( AttachedDevice != agNULL) { if ( SA_IDFRM_IS_SSP_TARGET(&sasIdentify) || SA_IDFRM_IS_SMP_TARGET(&sasIdentify) || SA_IDFRM_IS_SSP_INITIATOR(&sasIdentify) || SA_IDFRM_IS_SMP_INITIATOR(&sasIdentify) ) { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: Report a new SAS device !!\n")); } else { if ( SA_IDFRM_IS_STP_TARGET(&sasIdentify) || SA_IDFRM_IS_SATA_DEVICE(&sasIdentify) ) { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: Found an STP or SATA device.\n")); } else { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: Found Other type of device.\n")); } } /* LP2006-05-26 added upstream device to the newly found device */ AttachedDevice->dmExpander = oneExpander; DM_DBG3(("dmDownStreamDiscoverExpanderPhy: AttachedDevice %p did %d\n", AttachedDevice, AttachedDevice->id)); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: Attached oneExpander %p did %d\n", AttachedDevice->dmExpander, AttachedDevice->dmExpander->id)); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: oneDeviceData %p did %d\n", oneDeviceData, oneDeviceData->id)); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: oneExpander %p did %d\n", oneDeviceData->dmExpander, oneDeviceData->dmExpander->id)); /* If the phy has table routing attribute */ if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_TABLE) { /* If the attached device is a fan out expander */ if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) { /* TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscoverExpanderPhy: **** Topology Error two table routing phys are connected!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } else if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) { /* Allocate an expander data structure */ AttachedExpander = dmDiscoveringExpanderAlloc(dmRoot, onePortContext, AttachedDevice); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: Found a EDGE exp device.%p\n", AttachedExpander)); /* If allocate successfully */ if ( AttachedExpander != agNULL) { /* set up downstream information on configurable expander */ dmExpanderDownStreamPhyAdd(dmRoot, oneExpander, (bit8) oneExpander->discoveringPhyId); /* Setup upstream information */ dmExpanderUpStreamPhyAdd(dmRoot, AttachedExpander, (bit8) oneExpander->discoveringPhyId); AttachedExpander->hasUpStreamDevice = agTRUE; AttachedExpander->upStreamSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); AttachedExpander->upStreamSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); AttachedExpander->dmUpStreamExpander = oneExpander; /* (2.3.2.2.2.2.2.2.2) Add the pAttachedExpander to discovering list */ dmDiscoveringExpanderAdd(dmRoot, onePortContext, AttachedExpander); } /* If failed to allocate */ else { DM_DBG1(("dmDownStreamDiscoverExpanderPhy: Failed to allocate expander data structure!!!\n")); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } } /* DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_TABLE */ /* If status is still DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: 1st before\n")); dmDumpAllUpExp(dmRoot, onePortContext, oneExpander); UpStreamExpander = oneExpander->dmUpStreamExpander; ConfigurableExpander = dmFindConfigurableExp(dmRoot, onePortContext, oneExpander); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); if (ConfigurableExpander) { if ( (ConfigurableExpander->dmDevice->SASAddressID.sasAddressHi == DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo)) && (ConfigurableExpander->dmDevice->SASAddressID.sasAddressLo == DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo)) ) { /* directly attached between oneExpander and ConfigurableExpander */ DM_DBG3(("dmDownStreamDiscoverExpanderPhy: 1st before loc 1\n")); configSASAddressHi = oneExpander->dmDevice->SASAddressID.sasAddressHi; configSASAddressLo = oneExpander->dmDevice->SASAddressID.sasAddressLo; } else { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: 1st before loc 2\n")); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); } } /* if !ConfigurableExpander */ dupConfigSASAddr = dmDuplicateConfigSASAddr(dmRoot, ConfigurableExpander, configSASAddressHi, configSASAddressLo ); if ( ConfigurableExpander && dupConfigSASAddr == agFALSE) { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: 1st q123\n")); UpStreamExpander->dmCurrentDownStreamExpander = oneExpander; ConfigurableExpander->currentDownStreamPhyIndex = dmFindCurrentDownStreamPhyIndex(dmRoot, ConfigurableExpander); ConfigurableExpander->dmReturnginExpander = oneExpander; dmRoutingEntryAdd(dmRoot, ConfigurableExpander, ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex], configSASAddressHi, configSASAddressLo ); } } /* onePortContext->discovery.status == DISCOVERY_DOWN_STREAM */ } /* AttachedDevice != agNULL */ /* If fail to add the device */ else { DM_DBG1(("dmDownStreamDiscoverExpanderPhy: Failed to add a device!!!\n")); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } /* else 11 */ } /* AttachedDevice == agNULL */ /* If the device has been discovered before */ else /* haha discovered before 33 */ { /* If the phy has subtractive routing attribute */ if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE) { /* If the expander doesn't have up stream device */ if ( oneExpander->hasUpStreamDevice == agFALSE) { /* TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscoverExpanderPhy: **** Topology Error loop, or end device connects to two expanders!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } /* If the expander has up stream device */ else /* 44 */ { /* If sas address doesn't match */ if ( (oneExpander->upStreamSASAddressHi != attachedSasHi) || (oneExpander->upStreamSASAddressLo != attachedSasLo) ) { /* TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscoverExpanderPhy: **** Topology Error two subtractive phys!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } /* else 44 */ } /* DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE */ /* If the phy has table routing attribute */ else if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_TABLE) { /* If the attached device is a fan out expander */ if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) { /* (2.3.3.2.1.1) TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscoverExpanderPhy: **** Topology Error fan out expander to routing table phy!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } /* If the attached device is an edge expander */ else if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) { /* Setup up stream inform */ AttachedExpander = AttachedDevice->dmExpander; DM_DBG3(("dmDownStreamDiscoverExpanderPhy: Found edge expander=%p\n", AttachedExpander)); /* If the attached expander has up stream device */ if ( AttachedExpander->hasUpStreamDevice == agTRUE) { /* compare the sas address */ if ( (AttachedExpander->upStreamSASAddressHi != DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo)) || (AttachedExpander->upStreamSASAddressLo != DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo))) { /* TODO: discovery error, callback */ SAS2SAS11Check = dmSAS2SAS11ErrorCheck(dmRoot, onePortContext, AttachedExpander, oneExpander, oneExpander); if (SAS2SAS11Check == agTRUE) { DM_DBG1(("dmDownStreamDiscoverExpanderPhy: **** Topology Error SAS2 and SAS1.1!!!\n")); } else { DM_DBG1(("dmDownStreamDiscoverExpanderPhy: **** Topology Error two table routing phys connected (1)!!!\n")); } onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } else { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: Add edge expander=%p\n", AttachedExpander)); /* set up downstream information on configurable expander */ dmExpanderDownStreamPhyAdd(dmRoot, oneExpander, (bit8) oneExpander->discoveringPhyId); /* haha */ dmExpanderUpStreamPhyAdd(dmRoot, AttachedExpander, (bit8) oneExpander->discoveringPhyId); /* Add the pAttachedExpander to discovering list */ dmDiscoveringExpanderAdd(dmRoot, onePortContext, AttachedExpander); } } /* AttachedExpander->hasUpStreamDevice == agTRUE */ /* If the attached expander doesn't have up stream device */ else { /* TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscoverExpanderPhy: **** Topology Error two table routing phys connected (2)!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscoverExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } /* DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE */ } /* DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_TABLE */ /* do this regradless of sub or table */ /* If status is still DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: 2nd before\n")); dmDumpAllUpExp(dmRoot, onePortContext, oneExpander); UpStreamExpander = oneExpander->dmUpStreamExpander; ConfigurableExpander = dmFindConfigurableExp(dmRoot, onePortContext, oneExpander); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); if (ConfigurableExpander) { if ( (ConfigurableExpander->dmDevice->SASAddressID.sasAddressHi == DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo)) && (ConfigurableExpander->dmDevice->SASAddressID.sasAddressLo == DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo)) ) { /* directly attached between oneExpander and ConfigurableExpander */ DM_DBG3(("dmDownStreamDiscoverExpanderPhy: 2nd before loc 1\n")); configSASAddressHi = oneExpander->dmDevice->SASAddressID.sasAddressHi; configSASAddressLo = oneExpander->dmDevice->SASAddressID.sasAddressLo; } else { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: 2nd before loc 2\n")); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); } } /* if !ConfigurableExpander */ dupConfigSASAddr = dmDuplicateConfigSASAddr(dmRoot, ConfigurableExpander, configSASAddressHi, configSASAddressLo ); if ( ConfigurableExpander && dupConfigSASAddr == agFALSE) { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: 2nd q123 \n")); UpStreamExpander->dmCurrentDownStreamExpander = oneExpander; ConfigurableExpander->currentDownStreamPhyIndex = dmFindCurrentDownStreamPhyIndex(dmRoot, ConfigurableExpander); ConfigurableExpander->dmReturnginExpander = oneExpander; dmRoutingEntryAdd(dmRoot, ConfigurableExpander, ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex], configSASAddressHi, configSASAddressLo ); } } /* onePortContext->discovery.status == DISCOVERY_DOWN_STREAM */ /* incremental discovery */ if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_INCREMENTAL_START) { connectionRate = MIN(onePortContext->LinkRate, DISCRSP_GET_LINKRATE(pDiscoverResp)); if (DISCRSP_IS_STP_TARGET(pDiscoverResp) || DISCRSP_IS_SATA_DEVICE(pDiscoverResp)) { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: incremental SATA_STP\n")); dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, STP_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } else { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: incremental SAS\n")); dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, SAS_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } } /* onePortContext->discovery.type == DM_DISCOVERY_OPTION_INCREMENTAL_START */ } /* else 33 */ } /* (attachedSasLo != onePortContext->sasLocalAddressLo) */ else /* else 44 */ { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: Found Self\n")); DM_DBG3(("dmDownStreamDiscoverExpanderPhy: 3rd before\n")); dmDumpAllUpExp(dmRoot, onePortContext, oneExpander); UpStreamExpander = oneExpander->dmUpStreamExpander; ConfigurableExpander = dmFindConfigurableExp(dmRoot, onePortContext, oneExpander); dupConfigSASAddr = dmDuplicateConfigSASAddr(dmRoot, ConfigurableExpander, onePortContext->sasLocalAddressHi, onePortContext->sasLocalAddressLo ); if ( ConfigurableExpander && dupConfigSASAddr == agFALSE) { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: 3rd q123 Setup routing table\n")); UpStreamExpander->dmCurrentDownStreamExpander = oneExpander; ConfigurableExpander->currentDownStreamPhyIndex = dmFindCurrentDownStreamPhyIndex(dmRoot, ConfigurableExpander); ConfigurableExpander->dmReturnginExpander = oneExpander; dmRoutingEntryAdd(dmRoot, ConfigurableExpander, ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex], onePortContext->sasLocalAddressHi, onePortContext->sasLocalAddressLo ); } } /* else 44 */ } /* DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE */ /* If no device is attached */ else { DM_DBG2(("!!!!!!!!!!!!!!!!!!!!! SPIN SATA !!!!!!!!!!!!!!!!!!!!!!!!!!!\n")); negotiatedPhyLinkRate = DISCRSP_GET_LINKRATE(pDiscoverResp); // added by thenil if (negotiatedPhyLinkRate == 0x03) { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: SPIN SATA sent reset\n")); dmPhyControlSend(dmRoot, oneDeviceData, SMP_PHY_CONTROL_HARD_RESET, pDiscoverResp->phyIdentifier ); } /* do nothing */ } /* Increment the discovering phy id */ oneExpander->discoveringPhyId ++; /* If the discovery status is DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM ) { /* If not the last phy */ if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: More Phys to discover\n")); /* continue discovery for the next phy */ dmDiscoverSend(dmRoot, oneDeviceData); } /* If the last phy */ else { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: No More Phys\n")); /* for MCN */ dmUpdateAllAdjacent(dmRoot, onePortContext, oneDeviceData); /* remove the expander from the discovering list */ dmDiscoveringExpanderRemove(dmRoot, onePortContext, oneExpander); /* continue downstream discovering */ dmDownStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } } else { DM_DBG3(("dmDownStreamDiscoverExpanderPhy: onePortContext->discovery.status not in DISCOVERY_DOWN_STREAM; status %d\n", onePortContext->discovery.status)); } DM_DBG3(("dmDownStreamDiscoverExpanderPhy: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } /* works at SAS2 expander (called in dmDownStreamDiscover2ExpanderPhy()) if currentExpander is SAS2, called in dmDownStreamDiscover2ExpanderPhy() if currentExpander is SAS1.1, called in dmDownStreamDiscoverExpanderPhy() */ osGLOBAL bit32 dmSAS2SAS11ErrorCheck( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *topExpander, dmExpander_t *bottomExpander, dmExpander_t *currentExpander ) { bit32 result = agFALSE, i = 0; bit8 downStreamPhyID, upStreamPhyID; DM_DBG2(("dmSAS2SAS11ErrorCheck: start\n")); if (topExpander == agNULL) { DM_DBG2(("dmSAS2SAS11ErrorCheck: topExpander is NULL\n")); return result; } if (bottomExpander == agNULL) { DM_DBG2(("dmSAS2SAS11ErrorCheck: bottomExpander is NULL\n")); return result; } if (currentExpander == agNULL) { DM_DBG2(("dmSAS2SAS11ErrorCheck: currentExpander is NULL\n")); return result; } DM_DBG2(("dmSAS2SAS11ErrorCheck: topExpander addrHi 0x%08x addrLo 0x%08x\n", topExpander->dmDevice->SASAddressID.sasAddressHi, topExpander->dmDevice->SASAddressID.sasAddressLo)); DM_DBG2(("dmSAS2SAS11ErrorCheck: bottomExpander addrHi 0x%08x addrLo 0x%08x\n", bottomExpander->dmDevice->SASAddressID.sasAddressHi, bottomExpander->dmDevice->SASAddressID.sasAddressLo)); DM_DBG2(("dmSAS2SAS11ErrorCheck: currentExpander addrHi 0x%08x addrLo 0x%08x\n", currentExpander->dmDevice->SASAddressID.sasAddressHi, currentExpander->dmDevice->SASAddressID.sasAddressLo)); for (i=0;idownStreamPhys[i]; upStreamPhyID = bottomExpander->upStreamPhys[i]; if (currentExpander->SAS2 == 1) { if ( downStreamPhyID == upStreamPhyID && topExpander->routingAttribute[downStreamPhyID] == SAS_ROUTING_TABLE && bottomExpander->routingAttribute[i] == SAS_ROUTING_SUBTRACTIVE && topExpander->SAS2 == 0 && bottomExpander->SAS2 == 1 ) { result = agTRUE; break; } } else if (currentExpander->SAS2 == 0) { if ( downStreamPhyID == upStreamPhyID && topExpander->routingAttribute[downStreamPhyID] == SAS_ROUTING_SUBTRACTIVE && bottomExpander->routingAttribute[i] == SAS_ROUTING_TABLE && topExpander->SAS2 == 1 && bottomExpander->SAS2 == 0 ) { result = agTRUE; break; } } } return result; } osGLOBAL void dmDownStreamDiscover2ExpanderPhy( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander, smpRespDiscover2_t *pDiscoverResp ) { dmDeviceData_t *oneDeviceData; dmExpander_t *UpStreamExpander; dmDeviceData_t *AttachedDevice = agNULL; dmExpander_t *AttachedExpander; agsaSASIdentify_t sasIdentify; bit8 connectionRate; bit32 attachedSasHi, attachedSasLo; dmSASSubID_t dmSASSubID; dmExpander_t *ConfigurableExpander = agNULL; bit32 dupConfigSASAddr = agFALSE; bit32 configSASAddressHi; bit32 configSASAddressLo; bit32 SAS2SAS11Check = agFALSE; dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: start\n")); DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); DM_ASSERT(dmRoot, "(dmDownStreamDiscover2ExpanderPhy) dmRoot NULL"); DM_ASSERT(onePortContext, "(dmDownStreamDiscover2ExpanderPhy) pPort NULL"); DM_ASSERT(oneExpander, "(dmDownStreamDiscover2ExpanderPhy) pExpander NULL"); DM_ASSERT(pDiscoverResp, "(dmDownStreamDiscover2ExpanderPhy) pDiscoverResp NULL"); DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: onePortContxt=%p oneExpander=%p oneDeviceData=%p\n", onePortContext, oneExpander, oneExpander->dmDevice)); if (dmDiscoverCheck(dmRoot, onePortContext) == agTRUE) { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: invalid port or aborted discovery!!!\n")); return; } if (oneExpander != oneExpander->dmDevice->dmExpander) { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: wrong!!!\n")); } /* (1) Find the device structure of the expander */ oneDeviceData = oneExpander->dmDevice; DM_ASSERT(oneDeviceData, "(dmDownStreamDiscover2ExpanderPhy) pDevice NULL"); /* for debugging */ DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: Phy #%d of SAS %08x-%08x\n", oneExpander->discoveringPhyId, oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG2((" Attached device: %s\n", ( SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 0 ? "No Device" : (SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 1 ? "End Device" : (SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 2 ? "Edge Expander" : "Fanout Expander"))))); /* for debugging */ if (oneExpander->discoveringPhyId != pDiscoverResp->phyIdentifier) { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: !!! Incorrect SMP response !!!\n")); DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: Request PhyID #%d Response PhyID #%d\n", oneExpander->discoveringPhyId, pDiscoverResp->phyIdentifier)); dmhexdump("NO_DEVICE", (bit8*)pDiscoverResp, sizeof(smpRespDiscover2_t)); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); return; } if ( SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { DM_DBG2((" SAS address : %08x-%08x\n", SAS2_DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp), SAS2_DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp))); DM_DBG2((" SSP Target : %d\n", SAS2_DISCRSP_IS_SSP_TARGET(pDiscoverResp)?1:0)); DM_DBG2((" STP Target : %d\n", SAS2_DISCRSP_IS_STP_TARGET(pDiscoverResp)?1:0)); DM_DBG2((" SMP Target : %d\n", SAS2_DISCRSP_IS_SMP_TARGET(pDiscoverResp)?1:0)); DM_DBG2((" SATA DEVICE : %d\n", SAS2_DISCRSP_IS_SATA_DEVICE(pDiscoverResp)?1:0)); DM_DBG2((" SSP Initiator : %d\n", SAS2_DISCRSP_IS_SSP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG2((" STP Initiator : %d\n", SAS2_DISCRSP_IS_STP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG2((" SMP Initiator : %d\n", SAS2_DISCRSP_IS_SMP_INITIATOR(pDiscoverResp)?1:0)); DM_DBG2((" Phy ID : %d\n", pDiscoverResp->phyIdentifier)); DM_DBG2((" Attached Phy ID: %d\n", pDiscoverResp->attachedPhyIdentifier)); } /* saving routing attribute for non self-configuring expanders */ oneExpander->routingAttribute[pDiscoverResp->phyIdentifier] = SAS2_DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp); oneExpander->discoverSMPAllowed = agTRUE; /* If a device is attached */ if ( SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { /* Setup sasIdentify for the attached device */ sasIdentify.phyIdentifier = pDiscoverResp->phyIdentifier; sasIdentify.deviceType_addressFrameType = pDiscoverResp->attachedDeviceTypeReason & 0x70; sasIdentify.initiator_ssp_stp_smp = pDiscoverResp->attached_Ssp_Stp_Smp_Sata_Initiator; sasIdentify.target_ssp_stp_smp = pDiscoverResp->attached_SataPS_Ssp_Stp_Smp_Sata_Target; *(bit32*)sasIdentify.sasAddressHi = *(bit32*)pDiscoverResp->attachedSasAddressHi; *(bit32*)sasIdentify.sasAddressLo = *(bit32*)pDiscoverResp->attachedSasAddressLo; /* incremental discovery */ dmSASSubID.sasAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify); dmSASSubID.sasAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify); dmSASSubID.initiator_ssp_stp_smp = sasIdentify.initiator_ssp_stp_smp; dmSASSubID.target_ssp_stp_smp = sasIdentify.target_ssp_stp_smp; attachedSasHi = SAS2_DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp); attachedSasLo = SAS2_DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp); /* If it's a direct routing */ if ( SAS2_DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_DIRECT) { /* If the attached device is an expander */ if ( (SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) || (SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) ) { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: **** Topology Error direct routing can't connect to expander!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); return; } } /* If the expander's attached device is not myself */ if ( (attachedSasHi != onePortContext->sasLocalAddressHi) || (attachedSasLo != onePortContext->sasLocalAddressLo) ) { /* Find the attached device from discovered list */ AttachedDevice = dmPortSASDeviceFind(dmRoot, onePortContext, attachedSasLo, attachedSasHi, oneDeviceData); /* If the device has not been discovered before */ if ( AttachedDevice == agNULL) //11 { //qqqqqq if (0) { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: **** Topology Error subtractive routing error - inconsistent SAS address!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } else { /* Add the device */ /* read minimum rate from the configuration onePortContext->LinkRate is SPC's local link rate */ connectionRate = MIN(onePortContext->LinkRate, SAS2_DISCRSP_GET_LOGICAL_LINKRATE(pDiscoverResp)); DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: link rate 0x%x\n", DEVINFO_GET_LINKRATE(&oneDeviceData->agDeviceInfo))); DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: negotiatedPhyLinkRate 0x%x\n", SAS2_DISCRSP_GET_LINKRATE(pDiscoverResp))); DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: connectionRate 0x%x\n", connectionRate)); if (SAS2_DISCRSP_IS_STP_TARGET(pDiscoverResp) || SAS2_DISCRSP_IS_SATA_DEVICE(pDiscoverResp)) { if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, STP_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = dmFindRegNValid( dmRoot, onePortContext, &dmSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, STP_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } } } else { if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, SAS_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = dmFindRegNValid( dmRoot, onePortContext, &dmSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, SAS_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } } } DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: newDevice pDevice=%p\n", AttachedDevice)); /* If the device is added successfully */ if ( AttachedDevice != agNULL) { if ( SA_IDFRM_IS_SSP_TARGET(&sasIdentify) || SA_IDFRM_IS_SMP_TARGET(&sasIdentify) || SA_IDFRM_IS_SSP_INITIATOR(&sasIdentify) || SA_IDFRM_IS_SMP_INITIATOR(&sasIdentify) ) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: Report a new SAS device !!\n")); } else { if ( SA_IDFRM_IS_STP_TARGET(&sasIdentify) || SA_IDFRM_IS_SATA_DEVICE(&sasIdentify) ) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: Found an STP or SATA device.\n")); } else { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: Found Other type of device.\n")); } } /* LP2006-05-26 added upstream device to the newly found device */ AttachedDevice->dmExpander = oneExpander; DM_DBG3(("dmDownStreamDiscover2ExpanderPhy: AttachedDevice %p did %d\n", AttachedDevice, AttachedDevice->id)); DM_DBG3(("dmDownStreamDiscover2ExpanderPhy: Attached oneExpander %p did %d\n", AttachedDevice->dmExpander, AttachedDevice->dmExpander->id)); DM_DBG3(("dmDownStreamDiscover2ExpanderPhy: oneDeviceData %p did %d\n", oneDeviceData, oneDeviceData->id)); DM_DBG3(("dmDownStreamDiscover2ExpanderPhy: oneExpander %p did %d\n", oneDeviceData->dmExpander, oneDeviceData->dmExpander->id)); /* If the phy has table routing attribute */ if ( SAS2_DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_TABLE) { /* If the attached device is a fan out expander */ if ( SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) { /* TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: **** Topology Error two table routing phys are connected!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } else if ( SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) { /* Allocate an expander data structure */ AttachedExpander = dmDiscoveringExpanderAlloc(dmRoot, onePortContext, AttachedDevice); DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: Found a EDGE exp device.%p\n", AttachedExpander)); /* If allocate successfully */ if ( AttachedExpander != agNULL) { /* set up downstream information on configurable expander */ dmExpanderDownStreamPhyAdd(dmRoot, oneExpander, (bit8) oneExpander->discoveringPhyId); /* Setup upstream information */ dmExpanderUpStreamPhyAdd(dmRoot, AttachedExpander, (bit8) oneExpander->discoveringPhyId); //qqqqq AttachedExpander->hasUpStreamDevice = agTRUE; AttachedExpander->upStreamSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); AttachedExpander->upStreamSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); AttachedExpander->dmUpStreamExpander = oneExpander; /* (2.3.2.2.2.2.2.2.2) Add the pAttachedExpander to discovering list */ dmDiscoveringExpanderAdd(dmRoot, onePortContext, AttachedExpander); } /* If failed to allocate */ else { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy, Failed to allocate expander data structure!!!\n")); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } } //qqqqq else if ( SAS2_DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE && (SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE || SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) ) { /* Allocate an expander data structure */ AttachedExpander = dmDiscoveringExpanderAlloc(dmRoot, onePortContext, AttachedDevice); DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: Found a EDGE/FANOUT exp device.%p\n", AttachedExpander)); /* If allocate successfully */ if ( AttachedExpander != agNULL) { /* set up downstream information on configurable expander */ dmExpanderDownStreamPhyAdd(dmRoot, oneExpander, (bit8) oneExpander->discoveringPhyId); /* Setup upstream information */ dmExpanderUpStreamPhyAdd(dmRoot, AttachedExpander, (bit8) oneExpander->discoveringPhyId); AttachedExpander->hasUpStreamDevice = agTRUE; AttachedExpander->upStreamSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); AttachedExpander->upStreamSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); AttachedExpander->dmUpStreamExpander = oneExpander; /* (2.3.2.2.2.2.2.2.2) Add the pAttachedExpander to discovering list */ dmDiscoveringExpanderAdd(dmRoot, onePortContext, AttachedExpander); } /* If failed to allocate */ else { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy, Failed to allocate expander data structure (2)!!!\n")); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } /* If status is still DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM && onePortContext->discovery.ConfiguresOthers == agFALSE) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: 1st before\n")); dmDumpAllUpExp(dmRoot, onePortContext, oneExpander); UpStreamExpander = oneExpander->dmUpStreamExpander; ConfigurableExpander = dmFindConfigurableExp(dmRoot, onePortContext, oneExpander); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); if (ConfigurableExpander) { if ( (ConfigurableExpander->dmDevice->SASAddressID.sasAddressHi == DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo)) && (ConfigurableExpander->dmDevice->SASAddressID.sasAddressLo == DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo)) ) { /* directly attached between oneExpander and ConfigurableExpander */ DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: 1st before loc 1\n")); configSASAddressHi = oneExpander->dmDevice->SASAddressID.sasAddressHi; configSASAddressLo = oneExpander->dmDevice->SASAddressID.sasAddressLo; } else { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: 1st before loc 2\n")); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); } } /* if !ConfigurableExpander */ dupConfigSASAddr = dmDuplicateConfigSASAddr(dmRoot, ConfigurableExpander, configSASAddressHi, configSASAddressLo ); if ( ConfigurableExpander && dupConfigSASAddr == agFALSE) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: 1st q123\n")); UpStreamExpander->dmCurrentDownStreamExpander = oneExpander; ConfigurableExpander->currentDownStreamPhyIndex = dmFindCurrentDownStreamPhyIndex(dmRoot, ConfigurableExpander); ConfigurableExpander->dmReturnginExpander = oneExpander; dmRoutingEntryAdd(dmRoot, ConfigurableExpander, ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex], configSASAddressHi, configSASAddressLo ); } } } /* If fail to add the device */ else { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy, Failed to add a device!!!\n")); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } } /* If the device has been discovered before */ else /* discovered before */ { /* If the phy has subtractive routing attribute */ if ( SAS2_DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE) { /* If the expander doesn't have up stream device */ if ( oneExpander->hasUpStreamDevice == agFALSE) { /* TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: **** Topology Error loop, or end device connects to two expanders!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } /* If the expander has up stream device */ else { //qqqqq /* If sas address doesn't match */ if ( (oneExpander->upStreamSASAddressHi != attachedSasHi) || (oneExpander->upStreamSASAddressLo != attachedSasLo) ) { /* TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: **** two subtractive phys!!! Allowed in SAS2!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; onePortContext->discovery.DeferredError = agTRUE; } } } /* If the phy has table routing attribute */ else if ( SAS2_DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_TABLE) { /* If the attached device is a fan out expander */ if ( SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) { /* (2.3.3.2.1.1) TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: **** Topology Error fan out expander to routing table phy!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } /* If the attached device is an edge expander */ else if ( SAS2_DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) { /* Setup up stream inform */ AttachedExpander = AttachedDevice->dmExpander; DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: Found edge expander=%p\n", AttachedExpander)); //hhhhhh /* If the attached expander has up stream device */ if ( AttachedExpander->hasUpStreamDevice == agTRUE) { /* compare the sas address */ if ( (AttachedExpander->upStreamSASAddressHi != DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo)) || (AttachedExpander->upStreamSASAddressLo != DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo))) { if (AttachedExpander->TTTSupported && oneExpander->TTTSupported) { /* needs further error checking UpstreamExpanderOfAttachedExpander = AttachedExpander->UpStreamExpander for (i=0;idownStreamPhys[i] != 0 && } */ SAS2SAS11Check = dmSAS2SAS11ErrorCheck(dmRoot, onePortContext, AttachedExpander->dmUpStreamExpander, AttachedExpander, oneExpander); if (SAS2SAS11Check == agTRUE) { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: **** Topology Error SAS2 and SAS1.1!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } else { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: Allowed Table to Table (1)\n")); /* move on to the next phys but should be not proceed after oneExpander */ oneExpander->UndoDueToTTTSupported = agTRUE; onePortContext->discovery.DeferredError = agFALSE; } } else { /* TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: **** Topology Error two table routing phys connected (1)!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } else { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: Add edge expander=%p\n", AttachedExpander)); /* set up downstream information on configurable expander */ dmExpanderDownStreamPhyAdd(dmRoot, oneExpander, (bit8) oneExpander->discoveringPhyId); /* haha */ dmExpanderUpStreamPhyAdd(dmRoot, AttachedExpander, (bit8) oneExpander->discoveringPhyId); /* Add the pAttachedExpander to discovering list */ dmDiscoveringExpanderAdd(dmRoot, onePortContext, AttachedExpander); } } /* If the attached expander doesn't have up stream device */ else { if (AttachedExpander->TTTSupported && oneExpander->TTTSupported) { DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: Allowed Table to Table (2)\n")); /* move on to the next phys but should be not proceed after oneExpander */ oneExpander->UndoDueToTTTSupported = agTRUE; onePortContext->discovery.DeferredError = agFALSE; } else { /* TODO: discovery error, callback */ DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: **** Topology Error two table routing phys connected (2)!!!\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; DM_DBG1(("dmDownStreamDiscover2ExpanderPhy: sasAddressHi 0x%08x sasAddressLo 0x%08x phyid 0x%x\n", onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi, onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo, onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier)); /* discovery done */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } } } } /* for else if (SAS2_DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_TABLE) */ /* do this regradless of sub or table */ /* If status is still DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM && onePortContext->discovery.ConfiguresOthers == agFALSE) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: 2nd before\n")); dmDumpAllUpExp(dmRoot, onePortContext, oneExpander); UpStreamExpander = oneExpander->dmUpStreamExpander; ConfigurableExpander = dmFindConfigurableExp(dmRoot, onePortContext, oneExpander); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); if (ConfigurableExpander) { if ( (ConfigurableExpander->dmDevice->SASAddressID.sasAddressHi == DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo)) && (ConfigurableExpander->dmDevice->SASAddressID.sasAddressLo == DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo)) ) { /* directly attached between oneExpander and ConfigurableExpander */ DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: 2nd before loc 1\n")); configSASAddressHi = oneExpander->dmDevice->SASAddressID.sasAddressHi; configSASAddressLo = oneExpander->dmDevice->SASAddressID.sasAddressLo; } else { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: 2nd before loc 2\n")); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); } } /* if !ConfigurableExpander */ dupConfigSASAddr = dmDuplicateConfigSASAddr(dmRoot, ConfigurableExpander, configSASAddressHi, configSASAddressLo ); if ( ConfigurableExpander && dupConfigSASAddr == agFALSE) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: 2nd q123 \n")); UpStreamExpander->dmCurrentDownStreamExpander = oneExpander; ConfigurableExpander->currentDownStreamPhyIndex = dmFindCurrentDownStreamPhyIndex(dmRoot, ConfigurableExpander); ConfigurableExpander->dmReturnginExpander = oneExpander; dmRoutingEntryAdd(dmRoot, ConfigurableExpander, ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex], configSASAddressHi, configSASAddressLo ); } } /* if (onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) */ /* incremental discovery */ if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_INCREMENTAL_START) { connectionRate = MIN(onePortContext->LinkRate, SAS2_DISCRSP_GET_LOGICAL_LINKRATE(pDiscoverResp)); if (SAS2_DISCRSP_IS_STP_TARGET(pDiscoverResp) || SAS2_DISCRSP_IS_SATA_DEVICE(pDiscoverResp)) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: incremental SATA_STP\n")); dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, STP_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } else { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: incremental SAS\n")); dmPortSASDeviceAdd( dmRoot, onePortContext, sasIdentify, agFALSE, connectionRate, dmAllShared->itNexusTimeout, 0, SAS_DEVICE_TYPE, oneDeviceData, oneExpander, pDiscoverResp->phyIdentifier ); } } }/* else; existing devce */ } /* not attached to myself */ /* If the attached device is myself */ else { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: Found Self\n")); DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: 3rd before\n")); dmDumpAllUpExp(dmRoot, onePortContext, oneExpander); if (onePortContext->discovery.ConfiguresOthers == agFALSE) { UpStreamExpander = oneExpander->dmUpStreamExpander; ConfigurableExpander = dmFindConfigurableExp(dmRoot, onePortContext, oneExpander); dupConfigSASAddr = dmDuplicateConfigSASAddr(dmRoot, ConfigurableExpander, onePortContext->sasLocalAddressHi, onePortContext->sasLocalAddressLo ); if ( ConfigurableExpander && dupConfigSASAddr == agFALSE) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: 3rd q123 Setup routing table\n")); UpStreamExpander->dmCurrentDownStreamExpander = oneExpander; ConfigurableExpander->currentDownStreamPhyIndex = dmFindCurrentDownStreamPhyIndex(dmRoot, ConfigurableExpander); ConfigurableExpander->dmReturnginExpander = oneExpander; dmRoutingEntryAdd(dmRoot, ConfigurableExpander, ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex], onePortContext->sasLocalAddressHi, onePortContext->sasLocalAddressLo ); } } } } /* If no device is attached */ else { } /* Increment the discovering phy id */ oneExpander->discoveringPhyId ++; /* If the discovery status is DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM ) { /* If not the last phy */ if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: More Phys to discover\n")); /* continue discovery for the next phy */ dmDiscoverSend(dmRoot, oneDeviceData); } /* If the last phy */ else { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: No More Phys\n")); /* for MCN */ dmUpdateAllAdjacent(dmRoot, onePortContext, oneDeviceData); ConfigurableExpander = dmFindConfigurableExp(dmRoot, onePortContext, oneExpander); if (oneExpander->UndoDueToTTTSupported == agTRUE && ConfigurableExpander != agNULL) // if (oneExpander->UndoDueToTTTSupported == agTRUE) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: Not sure!!!\n")); dmDiscoveringUndoAdd(dmRoot, onePortContext, oneExpander); oneExpander->UndoDueToTTTSupported = agFALSE; } /* remove the expander from the discovering list */ dmDiscoveringExpanderRemove(dmRoot, onePortContext, oneExpander); /* continue downstream discovering */ dmDownStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } } else { DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: onePortContext->discovery.status not in DISCOVERY_DOWN_STREAM; status %d\n", onePortContext->discovery.status)); } DM_DBG2(("dmDownStreamDiscover2ExpanderPhy: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } osGLOBAL void dmDiscoveringUndoAdd( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmList_t *ExpanderList; dmExpander_t *tempExpander; dmIntPortContext_t *tmpOnePortContext = onePortContext; DM_DBG2(("dmDiscoveringUndoAdd: start\n")); if (DMLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { DM_DBG2(("dmDiscoveringUndoAdd: empty discoveringExpanderList\n")); return; } // DM_DBG2(("dmDiscoveringUndoAdd: before\n")); // dmDumpAllExp(dmRoot, onePortContext, oneExpander); ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; while (ExpanderList != &(tmpOnePortContext->discovery.discoveringExpanderList)) { tempExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); if ( tempExpander == agNULL) { DM_DBG1(("dmDiscoveringUndoAdd: tempExpander is NULL!!!\n")); return; } if (tempExpander->dmUpStreamExpander == oneExpander) { DM_DBG2(("dmDiscoveringUndoAdd: match!!! expander id %d\n", tempExpander->id)); DM_DBG2(("dmDiscoveringUndoAdd: exp addrHi 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG2(("dmDiscoveringUndoAdd: exp addrLo 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressLo)); tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); DMLIST_DEQUEUE_THIS(&(tempExpander->linkNode)); // DMLIST_ENQUEUE_AT_TAIL(&(tempExpander->linkNode), &(dmAllShared->freeExpanderList)); DMLIST_ENQUEUE_AT_TAIL(&(tempExpander->linkNode), &(dmAllShared->mainExpanderList)); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; } if (DMLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { DM_DBG2(("dmDiscoveringUndoAdd: hitting break\n")); break; } ExpanderList = ExpanderList->flink; } // DM_DBG2(("dmDiscoveringUndoAdd: after\n")); // dmDumpAllExp(dmRoot, onePortContext, oneExpander); return; } osGLOBAL void dmHandleZoneViolation( dmRoot_t *dmRoot, agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, dmDeviceData_t *oneDeviceData, dmSMPFrameHeader_t *frameHeader, agsaFrameHandle_t frameHandle ) { dmIntPortContext_t *onePortContext = agNULL; dmExpander_t *oneExpander = agNULL; DM_DBG1(("dmHandleZoneViolation: start\n")); DM_DBG1(("dmHandleZoneViolation: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG1(("dmHandleZoneViolation: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); onePortContext = oneDeviceData->dmPortContext; oneExpander = oneDeviceData->dmExpander; if (dmDiscoverCheck(dmRoot, onePortContext) == agTRUE) { DM_DBG1(("dmHandleZoneViolation: invalid port or aborted discovery!!!\n")); return; } /* for MCN */ dmUpdateAllAdjacent(dmRoot, onePortContext, oneDeviceData); /* remove the expander from the discovering list */ dmDiscoveringExpanderRemove(dmRoot, onePortContext, oneExpander); if ( onePortContext->discovery.status == DISCOVERY_UP_STREAM) { /* continue upstream discovering */ dmUpStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } else /* DISCOVERY_DOWN_STREAM or DISCOVERY_CONFIG_ROUTING */ { /* continue downstream discovering */ dmDownStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } return; } osGLOBAL void dmUpStreamDiscoverExpanderPhySkip( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { dmDeviceData_t *oneDeviceData; DM_DBG3(("dmUpStreamDiscoverExpanderPhySkip: start\n")); oneDeviceData = oneExpander->dmDevice; DM_DBG3(("dmUpStreamDiscoverExpanderPhySkip: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmUpStreamDiscoverExpanderPhySkip: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); oneExpander->discoveringPhyId++; if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { DM_DBG3(("dmUpStreamDiscoverExpanderPhySkip: More Phys to discover\n")); /* continue discovery for the next phy */ dmDiscoverSend(dmRoot, oneDeviceData); } else { DM_DBG3(("dmUpStreamDiscoverExpanderPhySkip: No More Phys\n")); /* for MCN */ dmUpdateAllAdjacent(dmRoot, onePortContext, oneDeviceData); /* remove the expander from the discovering list */ dmDiscoveringExpanderRemove(dmRoot, onePortContext, oneExpander); /* continue upstream discovering */ dmUpStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } } else { DM_DBG3(("dmUpStreamDiscoverExpanderPhySkip: onePortContext->discovery.status not in DISCOVERY_UP_STREAM; status %d\n", onePortContext->discovery.status)); } DM_DBG3(("dmUpStreamDiscoverExpanderPhySkip: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } osGLOBAL void dmUpStreamDiscover2ExpanderPhySkip( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { dmDeviceData_t *oneDeviceData; DM_DBG2(("dmUpStreamDiscover2ExpanderPhySkip: start\n")); oneDeviceData = oneExpander->dmDevice; oneExpander->discoveringPhyId++; if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { DM_DBG2(("dmUpStreamDiscover2ExpanderPhySkip: DISCOVERY_UP_STREAM find more ...\n")); /* continue discovery for the next phy */ dmDiscoverSend(dmRoot, oneDeviceData); } else { DM_DBG2(("dmUpStreamDiscover2ExpanderPhySkip: DISCOVERY_UP_STREAM last phy continue upstream..\n")); /* for MCN */ dmUpdateAllAdjacent(dmRoot, onePortContext, oneDeviceData); /* remove the expander from the discovering list */ dmDiscoveringExpanderRemove(dmRoot, onePortContext, oneExpander); /* continue upstream discovering */ dmUpStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } } else { DM_DBG2(("dmUpStreamDiscover2ExpanderPhySkip: onePortContext->discovery.status not in DISCOVERY_UP_STREAM; status %d\n", onePortContext->discovery.status)); } DM_DBG2(("dmUpStreamDiscover2ExpanderPhySkip: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } osGLOBAL void dmDownStreamDiscoverExpanderPhySkip( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { dmDeviceData_t *oneDeviceData; DM_DBG3(("dmDownStreamDiscoverExpanderPhySkip: start\n")); oneDeviceData = oneExpander->dmDevice; DM_DBG3(("dmDownStreamDiscoverExpanderPhySkip: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDownStreamDiscoverExpanderPhySkip: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); /* Increment the discovering phy id */ oneExpander->discoveringPhyId ++; /* If the discovery status is DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM ) { /* If not the last phy */ if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { DM_DBG3(("dmDownStreamDiscoverExpanderPhySkip: More Phys to discover\n")); /* continue discovery for the next phy */ dmDiscoverSend(dmRoot, oneDeviceData); } /* If the last phy */ else { DM_DBG3(("dmDownStreamDiscoverExpanderPhySkip: No More Phys\n")); /* for MCN */ dmUpdateAllAdjacent(dmRoot, onePortContext, oneDeviceData); /* remove the expander from the discovering list */ dmDiscoveringExpanderRemove(dmRoot, onePortContext, oneExpander); /* continue downstream discovering */ dmDownStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } } else { DM_DBG3(("dmDownStreamDiscoverExpanderPhySkip: onePortContext->discovery.status not in DISCOVERY_DOWN_STREAM; status %d\n", onePortContext->discovery.status)); } DM_DBG3(("dmDownStreamDiscoverExpanderPhySkip: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } osGLOBAL void dmDownStreamDiscover2ExpanderPhySkip( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { dmDeviceData_t *oneDeviceData; DM_DBG2(("dmDownStreamDiscover2ExpanderPhySkip: start\n")); oneDeviceData = oneExpander->dmDevice; /* Increment the discovering phy id */ oneExpander->discoveringPhyId ++; /* If the discovery status is DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM ) { /* If not the last phy */ if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { DM_DBG2(("dmDownStreamDiscover2ExpanderPhySkip: More Phys to discover\n")); /* continue discovery for the next phy */ dmDiscoverSend(dmRoot, oneDeviceData); } /* If the last phy */ else { DM_DBG2(("dmDownStreamDiscover2ExpanderPhySkip: No More Phys\n")); /* for MCN */ dmUpdateAllAdjacent(dmRoot, onePortContext, oneDeviceData); /* remove the expander from the discovering list */ dmDiscoveringExpanderRemove(dmRoot, onePortContext, oneExpander); /* continue downstream discovering */ dmDownStreamDiscovering(dmRoot, onePortContext, oneDeviceData); } } else { DM_DBG2(("dmDownStreamDiscover2ExpanderPhySkip: onePortContext->discovery.status not in DISCOVERY_DOWN_STREAM; status %d\n", onePortContext->discovery.status)); } DM_DBG2(("dmDownStreamDiscover2ExpanderPhySkip: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } osGLOBAL void dmExpanderUpStreamPhyAdd( dmRoot_t *dmRoot, dmExpander_t *oneExpander, bit8 phyId ) { bit32 i; bit32 hasSet = agFALSE; DM_DBG3(("dmExpanderUpStreamPhyAdd: start, phyid %d\n", phyId)); DM_DBG3(("dmExpanderUpStreamPhyAdd: exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmExpanderUpStreamPhyAdd: exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); DM_DBG3(("dmExpanderUpStreamPhyAdd: phyid %d numOfUpStreamPhys %d\n", phyId, oneExpander->numOfUpStreamPhys)); for ( i = 0; i < oneExpander->numOfUpStreamPhys; i ++ ) { if ( oneExpander->upStreamPhys[i] == phyId ) { hasSet = agTRUE; break; } } if ( hasSet == agFALSE ) { oneExpander->upStreamPhys[oneExpander->numOfUpStreamPhys ++] = phyId; } DM_DBG3(("dmExpanderUpStreamPhyAdd: AFTER phyid %d numOfUpStreamPhys %d\n", phyId, oneExpander->numOfUpStreamPhys)); /* for debugging */ for ( i = 0; i < oneExpander->numOfUpStreamPhys; i ++ ) { DM_DBG3(("dmExpanderUpStreamPhyAdd: index %d upstream[index] %d\n", i, oneExpander->upStreamPhys[i])); } return; } osGLOBAL void dmExpanderDownStreamPhyAdd( dmRoot_t *dmRoot, dmExpander_t *oneExpander, bit8 phyId ) { bit32 i; bit32 hasSet = agFALSE; DM_DBG3(("dmExpanderDownStreamPhyAdd: start, phyid %d\n", phyId)); DM_DBG3(("dmExpanderDownStreamPhyAdd: exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmExpanderDownStreamPhyAdd: exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); DM_DBG3(("dmExpanderDownStreamPhyAdd: phyid %d numOfDownStreamPhys %d\n", phyId, oneExpander->numOfDownStreamPhys)); for ( i = 0; i < oneExpander->numOfDownStreamPhys; i ++ ) { if ( oneExpander->downStreamPhys[i] == phyId ) { hasSet = agTRUE; break; } } if ( hasSet == agFALSE ) { oneExpander->downStreamPhys[oneExpander->numOfDownStreamPhys ++] = phyId; } DM_DBG3(("dmExpanderDownStreamPhyAdd: AFTER phyid %d numOfDownStreamPhys %d\n", phyId, oneExpander->numOfDownStreamPhys)); /* for debugging */ for ( i = 0; i < oneExpander->numOfDownStreamPhys; i ++ ) { DM_DBG3(("dmExpanderDownStreamPhyAdd: index %d downstream[index] %d\n", i, oneExpander->downStreamPhys[i])); } return; } osGLOBAL void dmDiscoveryReportMCN( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; bit16 extension = 0; dmDeviceData_t *oneAttachedExpDeviceData = agNULL; DM_DBG2(("dmDiscoveryReportMCN: start\n")); /* if full disocvery, report all devices using MCN if incremental discovery, 1. compare MCN and PrevMCN 2. report the changed ones; report MCN 3. set PrevMCN to MCN PrevMCN = MCN */ DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if ( oneDeviceData == agNULL) { DM_DBG1(("dmDiscoveryReportMCN: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmDiscoveryReportMCN: loop did %d\n", oneDeviceData->id)); if (oneDeviceData->dmPortContext == onePortContext) { DM_DBG2(("dmDiscoveryReportMCN: oneDeviceData sasAddressHi 0x%08x sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG2(("dmDiscoveryReportMCN: MCN 0x%08x PrevMCN 0x%08x\n", oneDeviceData->MCN, oneDeviceData->PrevMCN)); if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { DM_DBG2(("dmDiscoveryReportMCN: FULL_START\n")); } else { DM_DBG2(("dmDiscoveryReportMCN: INCREMENTAL_START\n")); } /* if MCN is 0, the device is removed */ if (oneDeviceData->MCN != oneDeviceData->PrevMCN && oneDeviceData->MCN != 0) { DM_DBG2(("dmDiscoveryReportMCN: reporting \n")); extension = oneDeviceData->dmDeviceInfo.ext; /* zero out MCN in extension */ extension = extension & 0x7FF; /* sets MCN in extension */ extension = extension | (oneDeviceData->MCN << 11); DEVINFO_PUT_EXT(&(oneDeviceData->dmDeviceInfo), extension); DM_DBG5(("dmDiscoveryReportMCN: MCN 0x%08x PrevMCN 0x%08x\n", DEVINFO_GET_EXT_MCN(&(oneDeviceData->dmDeviceInfo)), oneDeviceData->PrevMCN)); if (oneDeviceData->ExpDevice != agNULL) { DM_DBG2(("dmDiscoveryReportMCN: attached expander case\n")); oneAttachedExpDeviceData = oneDeviceData->ExpDevice; tddmReportDevice(dmRoot, onePortContext->dmPortContext, &oneDeviceData->dmDeviceInfo, &oneAttachedExpDeviceData->dmDeviceInfo, dmDeviceMCNChange); } else { DM_DBG2(("dmDiscoveryReportMCN: No attached expander case\n")); tddmReportDevice(dmRoot, onePortContext->dmPortContext, &oneDeviceData->dmDeviceInfo, agNULL, dmDeviceMCNChange); } oneDeviceData->PrevMCN = oneDeviceData->MCN; } else { DM_DBG2(("dmDiscoveryReportMCN: No change; no reporting \n")); if (oneDeviceData->MCN == 0) { oneDeviceData->PrevMCN = oneDeviceData->MCN; } } } DeviceListList = DeviceListList->flink; } return; } osGLOBAL void dmDiscoveryDumpMCN( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; DM_DBG3(("dmDiscoveryDumpMCN: start\n")); DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmDiscoveryDumpMCN: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmDiscoveryDumpMCN: loop did %d\n", oneDeviceData->id)); if (oneDeviceData->dmPortContext == onePortContext) { DM_DBG3(("dmDiscoveryDumpMCN: oneDeviceData sasAddressHi 0x%08x sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG3(("dmDiscoveryDumpMCN: MCN 0x%08x PrevMCN 0x%08x\n", oneDeviceData->MCN, oneDeviceData->PrevMCN)); } DeviceListList = DeviceListList->flink; } return; } osGLOBAL void dmDiscoveryResetMCN( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; DM_DBG2(("dmDiscoveryResetMCN: start\n")); /* reinitialize the device data belonging to this portcontext */ DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmDiscoveryResetMCN: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmDiscoveryResetMCN: loop did %d\n", oneDeviceData->id)); if (oneDeviceData->dmPortContext == onePortContext) { if (oneDeviceData->ExpDevice != agNULL) { DM_DBG2(("dmDiscoveryResetMCN: resetting oneDeviceData->ExpDevice\n")); oneDeviceData->ExpDevice = agNULL; } DM_DBG3(("dmDiscoveryResetMCN: resetting MCN and MCNdone\n")); oneDeviceData->MCN = 0; oneDeviceData->MCNDone = agFALSE; DM_DBG2(("dmDiscoveryResetMCN: oneDeviceData sasAddressHi 0x%08x sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); } DeviceListList = DeviceListList->flink; } return; } /* do min(oneDeviceData, found-one) in all upstream and downstream find ajcanent expanders and mark it done; sees only ajcacent targets */ osGLOBAL void dmUpdateAllAdjacent( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmDeviceData_t *oneDeviceData /* current one */ ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *tmponeDeviceData = agNULL; dmList_t *DeviceListList; DM_DBG2(("dmUpdateAllAdjacent: start\n")); if (oneDeviceData == agNULL) { DM_DBG1(("dmUpdateAllAdjacent: oneDeviceData is NULL!!!\n")); return; } oneDeviceData->MCNDone = agTRUE; DM_DBG2(("dmUpdateAllAdjacent: oneDeviceData sasAddressHi 0x%08x sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { tmponeDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if ( tmponeDeviceData == agNULL) { DM_DBG1(("dmUpdateAllAdjacent: tmponeDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmUpdateAllAdjacent: loop did %d\n", tmponeDeviceData->id)); if (tmponeDeviceData->dmPortContext == onePortContext && tmponeDeviceData->ExpDevice == oneDeviceData) { DM_DBG2(("dmUpdateAllAdjacent: setting MCN DONE\n")); DM_DBG2(("dmUpdateAllAdjacent: tmponeDeviceData sasAddressHi 0x%08x sasAddressLo 0x%08x\n", tmponeDeviceData->SASAddressID.sasAddressHi, tmponeDeviceData->SASAddressID.sasAddressLo)); tmponeDeviceData->MCNDone = agTRUE; if (oneDeviceData->directlyAttached == agFALSE) { DM_DBG2(("dmUpdateAllAdjacent: tmponeDeviceData MCN 0x%x\n", tmponeDeviceData->MCN)); DM_DBG2(("dmUpdateAllAdjacent: oneDeviceData MCN 0x%x\n", oneDeviceData->MCN)); tmponeDeviceData->MCN = MIN(oneDeviceData->MCN, tmponeDeviceData->MCN); } } DeviceListList = DeviceListList->flink; } return; } osGLOBAL void dmUpdateMCN( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmDeviceData_t *AdjacentDeviceData, /* adjacent expander */ dmDeviceData_t *oneDeviceData /* current one */ ) { DM_DBG2(("dmUpdateMCN: start\n")); if (AdjacentDeviceData == agNULL) { DM_DBG1(("dmUpdateMCN: AdjacentDeviceData is NULL!!!\n")); return; } if (oneDeviceData == agNULL) { DM_DBG1(("dmUpdateMCN: oneDeviceData is NULL!!!\n")); return; } DM_DBG2(("dmUpdateMCN: Current sasAddressHi 0x%08x sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG2(("dmUpdateMCN: AdjacentDeviceData one sasAddressHi 0x%08x sasAddressLo 0x%08x\n", AdjacentDeviceData->SASAddressID.sasAddressHi, AdjacentDeviceData->SASAddressID.sasAddressLo)); if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { DM_DBG2(("dmUpdateMCN: DISCOVERY_UP_STREAM\n")); } if (onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { DM_DBG2(("dmUpdateMCN: DISCOVERY_DOWN_STREAM\n")); } /* MCN */ /* directly attached one does not have MCN update only adjacent device data */ if (oneDeviceData->directlyAttached == agTRUE && AdjacentDeviceData->MCNDone == agFALSE) { AdjacentDeviceData->MCN++; DM_DBG2(("dmUpdateMCN: case 1 oneDeviceData MCN 0x%x\n", oneDeviceData->MCN)); DM_DBG2(("dmUpdateMCN: case 1 AdjacentDeviceData MCN 0x%x\n", AdjacentDeviceData->MCN)); } else if (AdjacentDeviceData->MCNDone == agFALSE) { AdjacentDeviceData->MCN++; AdjacentDeviceData->MCN = MIN(oneDeviceData->MCN, AdjacentDeviceData->MCN); DM_DBG2(("dmUpdateMCN: case 2 oneDeviceData MCN 0x%x\n", oneDeviceData->MCN)); DM_DBG2(("dmUpdateMCN: case 2 AdjacentDeviceData MCN 0x%x\n", AdjacentDeviceData->MCN)); } return; } /* go through expander list and device list array ??? */ osGLOBAL dmDeviceData_t * dmPortSASDeviceFind( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, bit32 sasAddrLo, bit32 sasAddrHi, dmDeviceData_t *CurrentDeviceData /* current expander */ ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData, *RetDeviceData=agNULL; dmList_t *DeviceListList; DM_DBG3(("dmPortSASDeviceFind: start\n")); DM_DBG3(("dmPortSASDeviceFind: sasAddressHi 0x%08x sasAddressLo 0x%08x\n", sasAddrHi, sasAddrLo)); DM_ASSERT((agNULL != dmRoot), ""); DM_ASSERT((agNULL != onePortContext), ""); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); /* find a device's existence */ DeviceListList = dmAllShared->MainDeviceList.flink; if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { DM_DBG3(("dmPortSASDeviceFind: Full discovery\n")); while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmPortSASDeviceFind: oneDeviceData is NULL!!!\n")); return agNULL; } if ((oneDeviceData->SASAddressID.sasAddressHi == sasAddrHi) && (oneDeviceData->SASAddressID.sasAddressLo == sasAddrLo) && (oneDeviceData->valid == agTRUE) && (oneDeviceData->dmPortContext == onePortContext) ) { DM_DBG3(("dmPortSASDeviceFind: Found pid %d did %d\n", onePortContext->id, oneDeviceData->id)); DM_DBG3(("dmPortSASDeviceFind: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmPortSASDeviceFind: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); RetDeviceData = oneDeviceData; dmUpdateMCN(dmRoot, onePortContext, RetDeviceData, CurrentDeviceData); break; } DeviceListList = DeviceListList->flink; } } else { /* incremental discovery */ DM_DBG3(("dmPortSASDeviceFind: Incremental discovery\n")); while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmPortSASDeviceFind: oneDeviceData is NULL!!!\n")); return agNULL; } if ((oneDeviceData->SASAddressID.sasAddressHi == sasAddrHi) && (oneDeviceData->SASAddressID.sasAddressLo == sasAddrLo) && (oneDeviceData->valid2 == agTRUE) && (oneDeviceData->dmPortContext == onePortContext) ) { DM_DBG3(("dmPortSASDeviceFind: Found pid %d did %d\n", onePortContext->id, oneDeviceData->id)); DM_DBG3(("dmPortSASDeviceFind: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmPortSASDeviceFind: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); RetDeviceData = oneDeviceData; dmUpdateMCN(dmRoot, onePortContext, RetDeviceData, CurrentDeviceData); break; } DeviceListList = DeviceListList->flink; } } tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); return RetDeviceData; } bit32 dmNewEXPorNot( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmSASSubID_t *dmSASSubID ) { // dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; // dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmExpander_t *oneExpander = agNULL; dmList_t *ExpanderList; bit32 ret = agTRUE; dmDeviceData_t *oneDeviceData = agNULL; DM_DBG3(("dmNewEXPorNot: start\n")); /* find a device's existence */ ExpanderList = onePortContext->discovery.discoveringExpanderList.flink; while (ExpanderList != &(onePortContext->discovery.discoveringExpanderList)) { oneExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); if ( oneExpander == agNULL) { DM_DBG1(("dmNewEXPorNot: oneExpander is NULL!!!\n")); return agFALSE; } oneDeviceData = oneExpander->dmDevice; if ((oneDeviceData->SASAddressID.sasAddressHi == dmSASSubID->sasAddressHi) && (oneDeviceData->SASAddressID.sasAddressLo == dmSASSubID->sasAddressLo) && (oneDeviceData->dmPortContext == onePortContext) ) { DM_DBG3(("dmNewEXPorNot: Found pid %d did %d\n", onePortContext->id, oneDeviceData->id)); ret = agFALSE; break; } ExpanderList = ExpanderList->flink; } return ret; } bit32 dmNewSASorNot( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmSASSubID_t *dmSASSubID ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; bit32 ret = agTRUE; DM_DBG3(("dmNewSASorNot: start\n")); /* find a device's existence */ DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmNewSASorNot: oneDeviceData is NULL!!!\n")); return agFALSE; } if ((oneDeviceData->SASAddressID.sasAddressHi == dmSASSubID->sasAddressHi) && (oneDeviceData->SASAddressID.sasAddressLo == dmSASSubID->sasAddressLo) && (oneDeviceData->dmPortContext == onePortContext) && (oneDeviceData->registered == agTRUE) ) { DM_DBG3(("dmNewSASorNot: Found pid %d did %d\n", onePortContext->id, oneDeviceData->id)); ret = agFALSE; break; } DeviceListList = DeviceListList->flink; } return ret; } /* call osGLOBAL bit32 tddmReportDevice( dmRoot_t *dmRoot, dmPortContext_t *dmPortContext, dmDeviceInfo_t *dmDeviceInfo ) if not reported, report Device to TDM */ osGLOBAL dmDeviceData_t * dmPortSASDeviceAdd( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, agsaSASIdentify_t sasIdentify, bit32 sasInitiator, bit8 connectionRate, bit32 itNexusTimeout, bit32 firstBurstSize, bit32 deviceType, dmDeviceData_t *oneExpDeviceData, dmExpander_t *dmExpander, bit8 phyID ) { dmDeviceData_t *oneDeviceData = agNULL; bit8 dev_s_rate = 0; bit8 sasorsata = 1; dmSASSubID_t dmSASSubID; bit8 ExpanderConnectionRate = connectionRate; dmDeviceData_t *oneAttachedExpDeviceData = agNULL; bit16 extension = 0; bit32 current_link_rate = 0; DM_DBG3(("dmPortSASDeviceAdd: start\n")); DM_DBG3(("dmPortSASDeviceAdd: connectionRate %d\n", connectionRate)); dmSASSubID.sasAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify); dmSASSubID.sasAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify); dmSASSubID.initiator_ssp_stp_smp = sasIdentify.initiator_ssp_stp_smp; dmSASSubID.target_ssp_stp_smp = sasIdentify.target_ssp_stp_smp; if (oneExpDeviceData != agNULL) { ExpanderConnectionRate = DEVINFO_GET_LINKRATE(&oneExpDeviceData->agDeviceInfo); DM_DBG3(("dmPortSASDeviceAdd: ExpanderConnectionRate 0x%x\n", ExpanderConnectionRate)); } if (oneExpDeviceData != agNULL) { if (oneExpDeviceData->SASAddressID.sasAddressHi == 0x0 && oneExpDeviceData->SASAddressID.sasAddressLo == 0x0) { DM_DBG1(("dmPortSASDeviceAdd: 1st Wrong expander!!!\n")); } } /* old device and already reported to TDM */ if ( agFALSE == dmNewSASorNot( dmRoot, onePortContext, &dmSASSubID ) ) /* old device */ { DM_DBG3(("dmPortSASDeviceAdd: OLD qqqq initiator_ssp_stp_smp %d target_ssp_stp_smp %d\n", dmSASSubID.initiator_ssp_stp_smp, dmSASSubID.target_ssp_stp_smp)); /* allocate a new device and set the valid bit */ oneDeviceData = dmAddSASToSharedcontext( dmRoot, onePortContext, &dmSASSubID, oneExpDeviceData, phyID ); if (oneDeviceData == agNULL) { DM_DBG1(("dmPortSASDeviceAdd: no more device, oneDeviceData is null!!!\n")); } /* If a device is allocated */ if ( oneDeviceData != agNULL ) { if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { DM_DBG3(("dmPortSASDeviceAdd: OLD, UP_STREAM\n")); } if (onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { DM_DBG3(("dmPortSASDeviceAdd: OLD, DOWN_STREAM\n")); } if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { DM_DBG3(("dmPortSASDeviceAdd: FULL_START\n")); oneDeviceData->MCN++; } else { /* incremental */ DM_DBG3(("dmPortSASDeviceAdd: INCREMENTAL_START\n")); if (oneDeviceData->MCN == 0 && oneDeviceData->directlyAttached == agFALSE) { oneDeviceData->MCN++; } } DM_DBG3(("dmPortSASDeviceAdd: oneDeviceData MCN 0x%08x\n", oneDeviceData->MCN)); DM_DBG3(("dmPortSASDeviceAdd: oneDeviceData sasAddressHi 0x%08x sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG3(("dmPortSASDeviceAdd: sasAddressHi 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify))); DM_DBG3(("dmPortSASDeviceAdd: sasAddressLo 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify))); // oneDeviceData->sasIdentify = sasIdentify; dm_memcpy(&(oneDeviceData->sasIdentify), &sasIdentify, sizeof(agsaSASIdentify_t)); DM_DBG3(("dmPortSASDeviceAdd: sasAddressHi 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify))); DM_DBG3(("dmPortSASDeviceAdd: sasAddressLo 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify))); /* parse sasIDframe to fill in agDeviceInfo */ DEVINFO_PUT_SMPTO(&oneDeviceData->agDeviceInfo, DEFAULT_SMP_TIMEOUT); DEVINFO_PUT_ITNEXUSTO(&oneDeviceData->agDeviceInfo, (bit16)itNexusTimeout); DEVINFO_PUT_FBS(&oneDeviceData->agDeviceInfo, (bit16)firstBurstSize); DEVINFO_PUT_FLAG(&oneDeviceData->agDeviceInfo, 1); oneDeviceData->SASSpecDeviceType = SA_IDFRM_GET_DEVICETTYPE(&sasIdentify); /* adjusting connectionRate */ oneAttachedExpDeviceData = oneDeviceData->ExpDevice; if (oneAttachedExpDeviceData != agNULL) { connectionRate = MIN(connectionRate, DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo)); DM_DBG3(("dmPortSASDeviceAdd: 1st connectionRate 0x%x DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo) 0x%x\n", connectionRate, DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo))); } else { DM_DBG3(("dmPortSASDeviceAdd: 1st oneAttachedExpDeviceData is NULL\n")); } /* Device Type, SAS or SATA, connection rate; bit7 --- bit0 */ sasorsata = (bit8)deviceType; /* sTSDK spec device typ */ dev_s_rate = dev_s_rate | (sasorsata << 4); dev_s_rate = dev_s_rate | MIN(connectionRate, ExpanderConnectionRate); /* detect link rate change */ current_link_rate = DEVINFO_GET_LINKRATE(&oneDeviceData->agDeviceInfo); if (current_link_rate != (bit32)MIN(connectionRate, ExpanderConnectionRate)) { DM_DBG1(("dmPortSASDeviceAdd: link rate changed current 0x%x new 0x%x\n", current_link_rate, MIN(connectionRate, ExpanderConnectionRate))); DEVINFO_PUT_DEV_S_RATE(&oneDeviceData->dmDeviceInfo, dev_s_rate); if (oneDeviceData->ExpDevice != agNULL) { oneAttachedExpDeviceData = oneDeviceData->ExpDevice; tddmReportDevice(dmRoot, onePortContext->dmPortContext, &oneDeviceData->dmDeviceInfo, &oneAttachedExpDeviceData->dmDeviceInfo, dmDeviceRateChange); } else { tddmReportDevice(dmRoot, onePortContext->dmPortContext, &oneDeviceData->dmDeviceInfo, agNULL, dmDeviceArrival); } } DEVINFO_PUT_DEV_S_RATE(&oneDeviceData->agDeviceInfo, dev_s_rate); DEVINFO_PUT_SAS_ADDRESSLO( &oneDeviceData->agDeviceInfo, SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify) ); DEVINFO_PUT_SAS_ADDRESSHI( &oneDeviceData->agDeviceInfo, SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify) ); oneDeviceData->agContext.osData = oneDeviceData; oneDeviceData->agContext.sdkData = agNULL; } return oneDeviceData; } /* old device */ /* new device */ DM_DBG3(("dmPortSASDeviceAdd: NEW qqqq initiator_ssp_stp_smp %d target_ssp_stp_smp %d\n", dmSASSubID.initiator_ssp_stp_smp, dmSASSubID.target_ssp_stp_smp)); /* allocate a new device and set the valid bit */ oneDeviceData = dmAddSASToSharedcontext( dmRoot, onePortContext, &dmSASSubID, oneExpDeviceData, phyID ); if (oneDeviceData == agNULL) { DM_DBG1(("dmPortSASDeviceAdd: no more device, oneDeviceData is null !!!\n")); } /* If a device is allocated */ if ( oneDeviceData != agNULL ) { // DM_DBG3(("dmPortSASDeviceAdd: sasAddressHi 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify))); // DM_DBG3(("dmPortSASDeviceAdd: sasAddressLo 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify))); // oneDeviceData->sasIdentify = sasIdentify; dm_memcpy(&(oneDeviceData->sasIdentify), &sasIdentify, sizeof(agsaSASIdentify_t)); if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { DM_DBG3(("dmPortSASDeviceAdd: NEW, UP_STREAM\n")); } if (onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { DM_DBG3(("dmPortSASDeviceAdd: NEW, DOWN_STREAM\n")); } if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { DM_DBG3(("dmPortSASDeviceAdd: FULL_START\n")); oneDeviceData->MCN++; } else { /* incremental */ DM_DBG3(("dmPortSASDeviceAdd: INCREMENTAL_START\n")); if (oneDeviceData->MCN == 0 && oneDeviceData->directlyAttached == agFALSE) { oneDeviceData->MCN++; } } DM_DBG3(("dmPortSASDeviceAdd: oneDeviceData MCN 0x%08x\n", oneDeviceData->MCN)); DM_DBG3(("dmPortSASDeviceAdd: oneDeviceData sasAddressHi 0x%08x sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG3(("dmPortSASDeviceAdd: sasAddressHi 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify))); DM_DBG3(("dmPortSASDeviceAdd: sasAddressLo 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify))); /* parse sasIDframe to fill in agDeviceInfo */ DEVINFO_PUT_SMPTO(&oneDeviceData->agDeviceInfo, DEFAULT_SMP_TIMEOUT); DEVINFO_PUT_ITNEXUSTO(&oneDeviceData->agDeviceInfo, (bit16)itNexusTimeout); DEVINFO_PUT_FBS(&oneDeviceData->agDeviceInfo, (bit16)firstBurstSize); DEVINFO_PUT_FLAG(&oneDeviceData->agDeviceInfo, 1); oneDeviceData->SASSpecDeviceType = SA_IDFRM_GET_DEVICETTYPE(&sasIdentify); /* adjusting connectionRate */ oneAttachedExpDeviceData = oneDeviceData->ExpDevice; if (oneAttachedExpDeviceData != agNULL) { connectionRate = MIN(connectionRate, DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo)); DM_DBG3(("dmPortSASDeviceAdd: 2nd connectionRate 0x%x DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo) 0x%x\n", connectionRate, DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo))); } else { DM_DBG3(("dmPortSASDeviceAdd: 2nd oneAttachedExpDeviceData is NULL\n")); } /* Device Type, SAS or SATA, connection rate; bit7 --- bit0 */ sasorsata = (bit8)deviceType; dev_s_rate = dev_s_rate | (sasorsata << 4); dev_s_rate = dev_s_rate | MIN(connectionRate, ExpanderConnectionRate); DEVINFO_PUT_DEV_S_RATE(&oneDeviceData->agDeviceInfo, dev_s_rate); DEVINFO_PUT_SAS_ADDRESSLO( &oneDeviceData->agDeviceInfo, SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify) ); DEVINFO_PUT_SAS_ADDRESSHI( &oneDeviceData->agDeviceInfo, SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify) ); oneDeviceData->agContext.osData = oneDeviceData; oneDeviceData->agContext.sdkData = agNULL; DM_DBG3(("dmPortSASDeviceAdd: did %d\n", oneDeviceData->id)); /* reporting to TDM; setting dmDeviceInfo */ DEVINFO_PUT_SMPTO(&oneDeviceData->dmDeviceInfo, DEFAULT_SMP_TIMEOUT); DEVINFO_PUT_ITNEXUSTO(&oneDeviceData->dmDeviceInfo, (bit16)itNexusTimeout); DEVINFO_PUT_FBS(&oneDeviceData->dmDeviceInfo, (bit16)firstBurstSize); DEVINFO_PUT_FLAG(&oneDeviceData->dmDeviceInfo, 1); DEVINFO_PUT_INITIATOR_SSP_STP_SMP(&oneDeviceData->dmDeviceInfo, dmSASSubID.initiator_ssp_stp_smp); DEVINFO_PUT_TARGET_SSP_STP_SMP(&oneDeviceData->dmDeviceInfo, dmSASSubID.target_ssp_stp_smp); extension = phyID; /* setting 6th bit of dev_s_rate */ if (oneDeviceData->SASSpecDeviceType == SAS_EDGE_EXPANDER_DEVICE || oneDeviceData->SASSpecDeviceType == SAS_FANOUT_EXPANDER_DEVICE ) { extension = (bit16)(extension | (1 << 8)); } DEVINFO_PUT_EXT(&oneDeviceData->dmDeviceInfo, extension); DEVINFO_PUT_DEV_S_RATE(&oneDeviceData->dmDeviceInfo, dev_s_rate); DEVINFO_PUT_SAS_ADDRESSLO( &oneDeviceData->dmDeviceInfo, SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify) ); DEVINFO_PUT_SAS_ADDRESSHI( &oneDeviceData->dmDeviceInfo, SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify) ); if (oneDeviceData->ExpDevice != agNULL) { DM_DBG3(("dmPortSASDeviceAdd: attached expander case\n")); oneAttachedExpDeviceData = oneDeviceData->ExpDevice; /* Puts attached expander's SAS address into dmDeviceInfo */ DEVINFO_PUT_SAS_ADDRESSLO( &oneAttachedExpDeviceData->dmDeviceInfo, oneAttachedExpDeviceData->SASAddressID.sasAddressLo ); DEVINFO_PUT_SAS_ADDRESSHI( &oneAttachedExpDeviceData->dmDeviceInfo, oneAttachedExpDeviceData->SASAddressID.sasAddressHi ); DM_DBG3(("dmPortSASDeviceAdd: oneAttachedExpDeviceData addrHi 0x%08x addrLo 0x%08x PhyID 0x%x ext 0x%x\n", DM_GET_SAS_ADDRESSHI(oneAttachedExpDeviceData->dmDeviceInfo.sasAddressHi), DM_GET_SAS_ADDRESSLO(oneAttachedExpDeviceData->dmDeviceInfo.sasAddressLo), phyID, extension)); if (oneAttachedExpDeviceData->SASAddressID.sasAddressHi == 0x0 && oneAttachedExpDeviceData->SASAddressID.sasAddressLo == 0x0) { DM_DBG1(("dmPortSASDeviceAdd: 2nd Wrong expander!!!\n")); } if (oneDeviceData->reported == agFALSE) { oneDeviceData->registered = agTRUE; oneDeviceData->reported = agTRUE; if (deviceType == STP_DEVICE_TYPE) { /*STP device, DM need send SMP Report Phy SATA to get the SATA device type */ oneAttachedExpDeviceData->dmExpander->dmDeviceToProcess = oneDeviceData; dmReportPhySataSend(dmRoot, oneAttachedExpDeviceData, phyID); } else { /* SAS or SMP device */ tddmReportDevice(dmRoot, onePortContext->dmPortContext, &oneDeviceData->dmDeviceInfo, &oneAttachedExpDeviceData->dmDeviceInfo, dmDeviceArrival); } } } else { DM_DBG3(("dmPortSASDeviceAdd: NO attached expander case\n")); if (oneDeviceData->reported == agFALSE) { oneDeviceData->registered = agTRUE; oneDeviceData->reported = agTRUE; tddmReportDevice(dmRoot, onePortContext->dmPortContext, &oneDeviceData->dmDeviceInfo, agNULL, dmDeviceArrival); } } } return oneDeviceData; } osGLOBAL dmDeviceData_t * dmFindRegNValid( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmSASSubID_t *dmSASSubID ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; bit32 found = agFALSE; DM_DBG3(("dmFindRegNValid: start\n")); /* find a device's existence */ DeviceListList = dmAllShared->MainDeviceList.flink; if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { DM_DBG3(("dmFindRegNValid: Full discovery\n")); while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmFindRegNValid: oneDeviceData is NULL!!!\n")); return agFALSE; } if ((oneDeviceData->SASAddressID.sasAddressHi == dmSASSubID->sasAddressHi) && (oneDeviceData->SASAddressID.sasAddressLo == dmSASSubID->sasAddressLo) && (oneDeviceData->valid == agTRUE) && (oneDeviceData->dmPortContext == onePortContext) ) { DM_DBG3(("dmFindRegNValid: Found pid %d did %d\n", onePortContext->id, oneDeviceData->id)); DM_DBG3(("dmFindRegNValid: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmFindRegNValid: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); found = agTRUE; break; } DeviceListList = DeviceListList->flink; } } else { /* incremental discovery */ DM_DBG3(("dmFindRegNValid: Incremental discovery\n")); while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmFindRegNValid: oneDeviceData is NULL!!!\n")); return agFALSE; } if ((oneDeviceData->SASAddressID.sasAddressHi == dmSASSubID->sasAddressHi) && (oneDeviceData->SASAddressID.sasAddressLo == dmSASSubID->sasAddressLo) && (oneDeviceData->valid2 == agTRUE) && (oneDeviceData->dmPortContext == onePortContext) ) { DM_DBG3(("dmFindRegNValid: Found pid %d did %d\n", onePortContext->id, oneDeviceData->id)); DM_DBG3(("dmFindRegNValid: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmFindRegNValid: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); found = agTRUE; break; } DeviceListList = DeviceListList->flink; } } if (found == agFALSE) { DM_DBG3(("dmFindRegNValid: end returning NULL\n")); return agNULL; } else { DM_DBG3(("dmFindRegNValid: end returning NOT NULL\n")); return oneDeviceData; } } osGLOBAL void dmNotifyBC( dmRoot_t *dmRoot, dmPortContext_t *dmPortContext, bit32 type) { dmIntPortContext_t *onePortContext = agNULL; onePortContext = (dmIntPortContext_t *)dmPortContext->dmData; DM_DBG3(("dmNotifyBC: start\n")); if (onePortContext == agNULL) { DM_DBG1(("dmNotifyBC: onePortContext is NULL, wrong!!!\n")); return; } if (type == OSSA_HW_EVENT_BROADCAST_CHANGE) { if (onePortContext->DiscoveryAbortInProgress == agFALSE) { if (onePortContext->DiscoveryState == DM_DSTATE_COMPLETED) { DM_DBG3(("dmNotifyBC: BROADCAST_CHANGE\n")); onePortContext->DiscoveryState = DM_DSTATE_NOT_STARTED; onePortContext->discoveryOptions = DM_DISCOVERY_OPTION_INCREMENTAL_START; /* processed broadcast change */ onePortContext->discovery.SeenBC = agFALSE; } else { DM_DBG3(("dmNotifyBC: pid %d BROADCAST_CHANGE; updating SeenBC. Do nothing.\n", onePortContext->id)); onePortContext->discovery.SeenBC = agTRUE; } } } else if (type == OSSA_HW_EVENT_BROADCAST_SES) { DM_DBG3(("dmNotifyBC: OSSA_HW_EVENT_BROADCAST_SES\n")); } else if (type == OSSA_HW_EVENT_BROADCAST_EXP) { DM_DBG3(("dmNotifyBC: OSSA_HW_EVENT_BROADCAST_EXP\n")); } else { DM_DBG3(("dmNotifyBC: unspecified broadcast type 0x%x\n", type)); } return; } #ifdef WORKED /* triggers incremental discovery */ osGLOBAL void dmNotifyBC( dmRoot_t *dmRoot, dmPortContext_t *dmPortContext, bit32 type) { dmIntPortContext_t *onePortContext = agNULL; onePortContext = (dmIntPortContext_t *)dmPortContext->dmData; DM_DBG3(("dmNotifyBC: start\n")); if (type == OSSA_HW_EVENT_BROADCAST_CHANGE) { if (onePortContext->DiscoveryState == DM_DSTATE_COMPLETED) { DM_DBG3(("dmNotifyBC: BROADCAST_CHANGE; does incremental discovery\n")); onePortContext->DiscoveryState = DM_DSTATE_NOT_STARTED; onePortContext->discoveryOptions = DM_DISCOVERY_OPTION_INCREMENTAL_START; /* processed broadcast change */ onePortContext->discovery.SeenBC = agFALSE; if (onePortContext->discovery.ResetTriggerred == agTRUE) { DM_DBG3(("dmNotifyBC: tdsaBCTimer\n")); dmBCTimer(dmRoot, onePortContext); } else { dmDiscover( dmRoot, dmPortContext, DM_DISCOVERY_OPTION_INCREMENTAL_START ); } } else { DM_DBG3(("dmNotifyBC: pid %d BROADCAST_CHANGE; updating SeenBC. Do nothing.\n", onePortContext->id)); onePortContext->discovery.SeenBC = agTRUE; } } else if (type == OSSA_HW_EVENT_BROADCAST_SES) { DM_DBG3(("dmNotifyBC: OSSA_HW_EVENT_BROADCAST_SES\n")); } else if (type == OSSA_HW_EVENT_BROADCAST_EXP) { DM_DBG3(("dmNotifyBC: OSSA_HW_EVENT_BROADCAST_EXP\n")); } else { DM_DBG3(("dmNotifyBC: unspecified broadcast type 0x%x\n", type)); } return; } #endif osGLOBAL bit32 dmResetFailedDiscovery( dmRoot_t *dmRoot, dmPortContext_t *dmPortContext) { dmIntPortContext_t *onePortContext = agNULL; DM_DBG1(("dmResetFailedDiscovery: start\n")); onePortContext = (dmIntPortContext_t *)dmPortContext->dmData; if (onePortContext == agNULL) { DM_DBG1(("dmResetFailedDiscovery: onePortContext is NULL, wrong!!!\n")); return DM_RC_FAILURE; } if (onePortContext->DiscoveryState == DM_DSTATE_COMPLETED_WITH_FAILURE) { onePortContext->DiscoveryState = DM_DSTATE_COMPLETED; } else { DM_DBG1(("dmResetFailedDiscovery: discovery is NOT DM_DSTATE_COMPLETED_WITH_FAILURE. It is 0x%x\n", onePortContext->DiscoveryState)); return DM_RC_FAILURE; } return DM_RC_SUCCESS; } osGLOBAL bit32 dmQueryDiscovery( dmRoot_t *dmRoot, dmPortContext_t *dmPortContext) { dmIntPortContext_t *onePortContext = agNULL; DM_DBG3(("dmQueryDiscovery: start\n")); onePortContext = (dmIntPortContext_t *)dmPortContext->dmData; if (onePortContext == agNULL) { DM_DBG1(("dmQueryDiscovery: onePortContext is NULL, wrong!!!\n")); return DM_RC_FAILURE; } /* call tddmQueryDiscoveryCB() */ if (onePortContext->DiscoveryState == DM_DSTATE_COMPLETED) { tddmQueryDiscoveryCB(dmRoot, dmPortContext, onePortContext->discoveryOptions, dmDiscCompleted); } else if (onePortContext->DiscoveryState == DM_DSTATE_COMPLETED_WITH_FAILURE) { tddmQueryDiscoveryCB(dmRoot, dmPortContext, onePortContext->discoveryOptions, dmDiscFailed); } else { tddmQueryDiscoveryCB(dmRoot, dmPortContext, onePortContext->discoveryOptions, dmDiscInProgress); } return DM_RC_SUCCESS; } /* should only for an expander */ osGLOBAL bit32 dmRegisterDevice( dmRoot_t *dmRoot, dmPortContext_t *dmPortContext, dmDeviceInfo_t *dmDeviceInfo, agsaDevHandle_t *agDevHandle ) { dmIntPortContext_t *onePortContext = agNULL; dmExpander_t *oneExpander = agNULL; bit32 sasAddressHi, sasAddressLo; dmDeviceData_t *oneDeviceData = agNULL; dmSASSubID_t dmSASSubID; DM_DBG3(("dmRegisterDevice: start\n")); onePortContext = (dmIntPortContext_t *)dmPortContext->dmData; if (onePortContext == agNULL) { DM_DBG1(("dmRegisterDevice: onePortContext is NULL!!!\n")); return DM_RC_FAILURE; } if (onePortContext->valid == agFALSE) { DM_DBG1(("dmRegisterDevice: invalid port!!!\n")); return DM_RC_FAILURE; } onePortContext->RegFailed = agFALSE; /* tdssAddSASToSharedcontext() from ossaHwCB() osGLOBAL void tdssAddSASToSharedcontext( tdsaPortContext_t *tdsaPortContext_Instance, agsaRoot_t *agRoot, agsaDevHandle_t *agDevHandle, tdsaSASSubID_t *agSASSubID, bit32 registered, bit8 phyID, bit32 flag ); from discovery osGLOBAL tdsaDeviceData_t * tdssNewAddSASToSharedcontext( agsaRoot_t *agRoot, tdsaPortContext_t *onePortContext, tdsaSASSubID_t *agSASSubID, tdsaDeviceData_t *oneExpDeviceData, bit8 phyID ); */ /* start here */ dmSASSubID.sasAddressHi = DM_GET_SAS_ADDRESSHI(dmDeviceInfo->sasAddressHi); dmSASSubID.sasAddressLo = DM_GET_SAS_ADDRESSHI(dmDeviceInfo->sasAddressLo); dmSASSubID.initiator_ssp_stp_smp = dmDeviceInfo->initiator_ssp_stp_smp; dmSASSubID.target_ssp_stp_smp = dmDeviceInfo->target_ssp_stp_smp; oneDeviceData = dmAddSASToSharedcontext(dmRoot, onePortContext, &dmSASSubID, agNULL, 0xFF); if (oneDeviceData == agNULL) { DM_DBG1(("dmRegisterDevice: oneDeviceData is NULL!!!\n")); return DM_RC_FAILURE; } oneDeviceData->agDeviceInfo.devType_S_Rate = dmDeviceInfo->devType_S_Rate; dm_memcpy(oneDeviceData->agDeviceInfo.sasAddressHi, dmDeviceInfo->sasAddressHi, 4); dm_memcpy(oneDeviceData->agDeviceInfo.sasAddressLo, dmDeviceInfo->sasAddressLo, 4); /* finds the type of expanders */ if (DEVINFO_GET_EXT_SMP(dmDeviceInfo)) { if (DEVINFO_GET_EXT_EXPANDER_TYPE(dmDeviceInfo) == SAS_EDGE_EXPANDER_DEVICE) { oneDeviceData->SASSpecDeviceType = SAS_EDGE_EXPANDER_DEVICE; } else if (DEVINFO_GET_EXT_EXPANDER_TYPE(dmDeviceInfo) == SAS_FANOUT_EXPANDER_DEVICE) { oneDeviceData->SASSpecDeviceType = SAS_FANOUT_EXPANDER_DEVICE; } else { /* default */ DM_DBG4(("dmRegisterDevice: no expander type. default to edge expander\n")); oneDeviceData->SASSpecDeviceType = SAS_EDGE_EXPANDER_DEVICE; } } if (DEVINFO_GET_EXT_MCN(dmDeviceInfo) == 0xF) { DM_DBG1(("dmRegisterDevice: directly attached expander\n")); oneDeviceData->directlyAttached = agTRUE; oneDeviceData->dmDeviceInfo.ext = (bit16)(oneDeviceData->dmDeviceInfo.ext | (0xF << 11)); } else { DM_DBG1(("dmRegisterDevice: NOT directly attached expander\n")); oneDeviceData->directlyAttached = agFALSE; } if (onePortContext->DiscoveryState == DM_DSTATE_NOT_STARTED) { DM_DBG3(("dmRegisterDevice: DM_DSTATE_NOT_STARTED\n")); /* before the discovery is started */ oneExpander = dmDiscoveringExpanderAlloc(dmRoot, onePortContext, oneDeviceData); if ( oneExpander != agNULL) { oneExpander->agDevHandle = agDevHandle; /* update SAS address field */ oneExpander->dmDevice->SASAddressID.sasAddressHi = DM_GET_SAS_ADDRESSHI(dmDeviceInfo->sasAddressHi); oneExpander->dmDevice->SASAddressID.sasAddressLo = DM_GET_SAS_ADDRESSLO(dmDeviceInfo->sasAddressLo); DM_DBG3(("dmRegisterDevice: AddrHi 0x%08x AddrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi, oneExpander->dmDevice->SASAddressID.sasAddressLo)); dmDiscoveringExpanderAdd(dmRoot, onePortContext, oneExpander); } else { DM_DBG1(("dmRegisterDevice: failed to allocate expander !!!\n")); /* remember that the registration failed so that a discovery can't be started */ onePortContext->RegFailed = agTRUE; return DM_RC_FAILURE; } } else { /* the discovery has started. Alloc and add have been done. find an expander using dmDeviceInfo, and update the expander's agDevHandle call dmExpFind() */ DM_DBG3(("dmRegisterDevice: NOT DM_DSTATE_NOT_STARTED\n")); sasAddressHi = DM_GET_SAS_ADDRESSHI(dmDeviceInfo->sasAddressHi); sasAddressLo = DM_GET_SAS_ADDRESSLO(dmDeviceInfo->sasAddressLo); DM_DBG3(("dmRegisterDevice: AddrHi 0x%08x AddrLo 0x%08x\n", sasAddressHi, sasAddressLo)); oneExpander = dmExpFind(dmRoot, onePortContext, sasAddressHi, sasAddressLo); if ( oneExpander != agNULL) { oneExpander->agDevHandle = agDevHandle; } else { DM_DBG1(("dmRegisterDevice: not allowed case, wrong !!!\n")); return DM_RC_FAILURE; } } return DM_RC_SUCCESS; } osGLOBAL dmExpander_t * dmDiscoveringExpanderAlloc( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmDeviceData_t *oneDeviceData ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmExpander_t *oneExpander = agNULL; dmList_t *ExpanderList; DM_DBG3(("dmDiscoveringExpanderAlloc: start\n")); DM_DBG3(("dmDiscoveringExpanderAlloc: did %d\n", oneDeviceData->id)); DM_DBG3(("dmDiscoveringExpanderAlloc: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDiscoveringExpanderAlloc: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); if (onePortContext->valid == agFALSE) { DM_DBG1(("dmDiscoveringExpanderAlloc: invalid port!!!\n")); return agNULL; } /* check exitence in dmAllShared->mainExpanderList */ oneExpander = dmExpMainListFind(dmRoot, onePortContext, oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo); if (oneExpander == agNULL) { tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); if (DMLIST_EMPTY(&(dmAllShared->freeExpanderList))) { DM_DBG1(("dmDiscoveringExpanderAlloc: no free expanders pid %d!!!\n", onePortContext->id)); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); return agNULL; } else { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); DMLIST_DEQUEUE_FROM_HEAD(&ExpanderList, &(dmAllShared->freeExpanderList)); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); oneExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); } if (oneExpander != agNULL) { DM_DBG1(("dmDiscoveringExpanderAlloc: pid %d exp id %d \n", onePortContext->id, oneExpander->id)); tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); DMLIST_DEQUEUE_THIS(&(oneExpander->linkNode)); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); oneExpander->dmDevice = oneDeviceData; oneExpander->dmUpStreamExpander = agNULL; oneExpander->dmCurrentDownStreamExpander = agNULL; oneExpander->dmReturnginExpander = agNULL; oneExpander->hasUpStreamDevice = agFALSE; oneExpander->numOfUpStreamPhys = 0; oneExpander->currentUpStreamPhyIndex = 0; oneExpander->discoveringPhyId = 0; oneExpander->underDiscovering = agFALSE; dm_memset( &(oneExpander->currentIndex), 0, sizeof(oneExpander->currentIndex)); oneDeviceData->dmExpander = oneExpander; DM_DBG3(("dmDiscoveringExpanderAlloc: oneDeviceData %p did %d\n", oneDeviceData, oneDeviceData->id)); DM_DBG3(("dmDiscoveringExpanderAlloc: oneExpander %p did %d\n", oneDeviceData->dmExpander, oneDeviceData->dmExpander->id)); } return oneExpander; } osGLOBAL void dmDiscoveringExpanderAdd( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { DM_DBG3(("dmDiscoveringExpanderAdd: start\n")); DM_DBG3(("dmDiscoveringExpanderAdd: expander id %d\n", oneExpander->id)); DM_DBG3(("dmDiscoveringExpanderAdd: exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmDiscoveringExpanderAdd: exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); if (onePortContext->valid == agFALSE) { DM_DBG1(("dmDiscoveringExpanderAdd: invalid port!!!\n")); return; } if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { DM_DBG3(("dmDiscoveringExpanderAdd: UPSTREAM\n")); } else if (onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { DM_DBG3(("dmDiscoveringExpanderAdd: DOWNSTREAM\n")); } else { DM_DBG3(("dmDiscoveringExpanderAdd: status %d\n", onePortContext->discovery.status)); } if ( oneExpander->underDiscovering == agFALSE) { DM_DBG3(("dmDiscoveringExpanderAdd: ADDED \n")); oneExpander->underDiscovering = agTRUE; tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); DMLIST_ENQUEUE_AT_TAIL(&(oneExpander->linkNode), &(onePortContext->discovery.discoveringExpanderList)); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } return; } osGLOBAL dmExpander_t * dmFindConfigurableExp( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { dmExpander_t *tempExpander; dmIntPortContext_t *tmpOnePortContext = onePortContext; dmExpander_t *ret = agNULL; DM_DBG3(("dmFindConfigurableExp: start\n")); if (oneExpander == agNULL) { DM_DBG3(("dmFindConfigurableExp: NULL expander\n")); return agNULL; } DM_DBG3(("dmFindConfigurableExp: exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmFindConfigurableExp: exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); if (DMLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); DM_DBG3(("dmFindConfigurableExp: empty UpdiscoveringExpanderList\n")); return agNULL; } else { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } tempExpander = oneExpander->dmUpStreamExpander; while (tempExpander) { DM_DBG3(("dmFindConfigurableExp: loop exp addrHi 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmFindConfigurableExp: loop exp addrLo 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressLo)); if (tempExpander->configRouteTable) { DM_DBG3(("dmFindConfigurableExp: found configurable expander\n")); ret = tempExpander; break; } tempExpander = tempExpander->dmUpStreamExpander; } return ret; } osGLOBAL bit32 dmDuplicateConfigSASAddr( dmRoot_t *dmRoot, dmExpander_t *oneExpander, bit32 configSASAddressHi, bit32 configSASAddressLo ) { bit32 i; bit32 ret = agFALSE; DM_DBG3(("dmDuplicateConfigSASAddr: start\n")); if (oneExpander == agNULL) { DM_DBG3(("dmDuplicateConfigSASAddr: NULL expander\n")); return agTRUE; } if (oneExpander->dmDevice->SASAddressID.sasAddressHi == configSASAddressHi && oneExpander->dmDevice->SASAddressID.sasAddressLo == configSASAddressLo ) { DM_DBG3(("dmDuplicateConfigSASAddr: unnecessary\n")); return agTRUE; } DM_DBG3(("dmDuplicateConfigSASAddr: exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmDuplicateConfigSASAddr: exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); DM_DBG3(("dmDuplicateConfigSASAddr: configsasAddressHi 0x%08x\n", configSASAddressHi)); DM_DBG3(("dmDuplicateConfigSASAddr: configsasAddressLo 0x%08x\n", configSASAddressLo)); DM_DBG3(("dmDuplicateConfigSASAddr: configSASAddrTableIndex %d\n", oneExpander->configSASAddrTableIndex)); for(i=0;iconfigSASAddrTableIndex;i++) { if (oneExpander->configSASAddressHiTable[i] == configSASAddressHi && oneExpander->configSASAddressLoTable[i] == configSASAddressLo ) { DM_DBG3(("dmDuplicateConfigSASAddr: FOUND\n")); ret = agTRUE; break; } } /* new one; let's add it */ if (ret == agFALSE) { DM_DBG3(("dmDuplicateConfigSASAddr: adding configSAS Addr\n")); DM_DBG3(("dmDuplicateConfigSASAddr: configSASAddrTableIndex %d\n", oneExpander->configSASAddrTableIndex)); oneExpander->configSASAddressHiTable[oneExpander->configSASAddrTableIndex] = configSASAddressHi; oneExpander->configSASAddressLoTable[oneExpander->configSASAddrTableIndex] = configSASAddressLo; oneExpander->configSASAddrTableIndex++; } return ret; } osGLOBAL bit16 dmFindCurrentDownStreamPhyIndex( dmRoot_t *dmRoot, dmExpander_t *oneExpander ) { dmExpander_t *DownStreamExpander; bit16 index = 0; bit16 i; bit8 phyId = 0; DM_DBG3(("dmFindCurrentDownStreamPhyIndex: start\n")); if (oneExpander == agNULL) { DM_DBG1(("dmFindCurrentDownStreamPhyIndex: wrong, oneExpander is NULL!!!\n")); return 0; } DownStreamExpander = oneExpander->dmCurrentDownStreamExpander; if (DownStreamExpander == agNULL) { DM_DBG1(("dmFindCurrentDownStreamPhyIndex: wrong, DownStreamExpander is NULL!!!\n")); return 0; } DM_DBG3(("dmFindCurrentDownStreamPhyIndex: exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmFindCurrentDownStreamPhyIndex: exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); DM_DBG3(("dmFindCurrentDownStreamPhyIndex: downstream exp addrHi 0x%08x\n", DownStreamExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmFindCurrentDownStreamPhyIndex: downstream exp addrLo 0x%08x\n", DownStreamExpander->dmDevice->SASAddressID.sasAddressLo)); DM_DBG3(("dmFindCurrentDownStreamPhyIndex: numOfDownStreamPhys %d\n", oneExpander->numOfDownStreamPhys)); phyId = DownStreamExpander->upStreamPhys[0]; DM_DBG3(("dmFindCurrentDownStreamPhyIndex: phyId %d\n", phyId)); for (i=0; inumOfDownStreamPhys;i++) { if (oneExpander->downStreamPhys[i] == phyId) { index = i; break; } } DM_DBG3(("dmFindCurrentDownStreamPhyIndex: index %d\n", index)); return index; } osGLOBAL bit32 dmFindDiscoveringExpander( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { dmList_t *ExpanderList; dmExpander_t *tempExpander; dmIntPortContext_t *tmpOnePortContext = onePortContext; bit32 ret = agFALSE; DM_DBG3(("dmFindDiscoveringExpander: start\n")); DM_DBG3(("dmFindDiscoveringExpander: exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmFindDiscoveringExpander: exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); if (DMLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { DM_DBG3(("dmFindDiscoveringExpander: empty discoveringExpanderList\n")); return ret; } ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; while (ExpanderList != &(tmpOnePortContext->discovery.discoveringExpanderList)) { tempExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); if (tempExpander == oneExpander) { if (tempExpander != agNULL) { DM_DBG3(("dmFindDiscoveringExpander: match, expander id %d\n", tempExpander->id)); DM_DBG3(("dmFindDiscoveringExpander: exp addrHi 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmFindDiscoveringExpander: exp addrLo 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressLo)); } ret = agTRUE; break; } ExpanderList = ExpanderList->flink; } return ret; } osGLOBAL void dmDiscoveringExpanderRemove( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; DM_DBG3(("dmDiscoveringExpanderRemove: start\n")); DM_DBG3(("dmDiscoveringExpanderRemove: expander id %d\n", oneExpander->id)); DM_DBG3(("dmDiscoveringExpanderRemove: exp addrHi 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmDiscoveringExpanderRemove: exp addrLo 0x%08x\n", oneExpander->dmDevice->SASAddressID.sasAddressLo)); DM_DBG3(("dmDiscoveringExpanderRemove: BEFORE\n")); dmDumpAllExp(dmRoot, onePortContext, oneExpander); dmDumpAllUpExp(dmRoot, onePortContext, oneExpander); dmDumpAllFreeExp(dmRoot); // if is temporary till smp problem is fixed if (dmFindDiscoveringExpander(dmRoot, onePortContext, oneExpander) == agTRUE) { DM_DBG3(("dmDiscoveringExpanderRemove: oneDeviceData %p did %d\n", oneExpander->dmDevice, oneExpander->dmDevice->id)); DM_DBG3(("dmDiscoveringExpanderRemove: oneExpander %p did %d\n", oneExpander, oneExpander->id)); if (oneExpander != oneExpander->dmDevice->dmExpander) { DM_DBG3(("dmDiscoveringExpanderRemove: before !!! wrong !!!\n")); } oneExpander->underDiscovering = agFALSE; oneExpander->discoveringPhyId = 0; tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); DMLIST_DEQUEUE_THIS(&(oneExpander->linkNode)); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { DM_DBG3(("dmDiscoveringExpanderRemove: DISCOVERY_UP_STREAM\n")); tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); DMLIST_ENQUEUE_AT_TAIL(&(oneExpander->upNode), &(onePortContext->discovery.UpdiscoveringExpanderList)); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); onePortContext->discovery.NumOfUpExp++; } else { DM_DBG3(("dmDiscoveringExpanderRemove: Status %d\n", onePortContext->discovery.status)); tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); DMLIST_ENQUEUE_AT_TAIL(&(oneExpander->linkNode), &(dmAllShared->mainExpanderList)); // DMLIST_ENQUEUE_AT_TAIL(&(oneExpander->linkNode), &(dmAllShared->freeExpanderList)); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } // error checking if (oneExpander != oneExpander->dmDevice->dmExpander) { DM_DBG3(("dmDiscoveringExpanderRemove: after !!! wrong !!!\n")); } } //end temp if else { DM_DBG1(("dmDiscoveringExpanderRemove: !!! problem !!!\n")); } DM_DBG3(("dmDiscoveringExpanderRemove: AFTER\n")); dmDumpAllExp(dmRoot, onePortContext, oneExpander); dmDumpAllUpExp(dmRoot, onePortContext, oneExpander); dmDumpAllFreeExp(dmRoot); return; } /* returns an expander with sasAddrLo, sasAddrHi from dmAllShared->mainExpanderList */ osGLOBAL dmExpander_t * dmExpMainListFind( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, bit32 sasAddrHi, bit32 sasAddrLo ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmList_t *ExpanderList; dmExpander_t *tempExpander; DM_DBG3(("dmExpMainListFind: start\n")); tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); if (DMLIST_EMPTY(&(dmAllShared->mainExpanderList))) { DM_DBG1(("dmExpMainListFind: empty mainExpanderList\n")); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); return agNULL; } else { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } ExpanderList = dmAllShared->mainExpanderList.flink; while (ExpanderList != &(dmAllShared->mainExpanderList)) { tempExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); if (tempExpander == agNULL) { DM_DBG1(("dmExpMainListFind: tempExpander is NULL!!!\n")); return agNULL; } DM_DBG3(("dmExpMainListFind: expander id %d\n", tempExpander->id)); DM_DBG3(("dmExpMainListFind: exp addrHi 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmExpMainListFind: exp addrLo 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressLo)); if ((tempExpander->dmDevice->SASAddressID.sasAddressHi == sasAddrHi) && (tempExpander->dmDevice->SASAddressID.sasAddressLo == sasAddrLo) && (tempExpander->dmDevice->dmPortContext == onePortContext) ) { DM_DBG3(("dmExpMainListFind: found expander id %d\n", tempExpander->id)); DM_DBG3(("dmExpMainListFind: found exp addrHi 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmExpMainListFind: found exp addrLo 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressLo)); return tempExpander; } ExpanderList = ExpanderList->flink; } return agNULL; } /* returns an expander with sasAddrLo, sasAddrHi from discoveringExpanderList */ osGLOBAL dmExpander_t * dmExpFind( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, bit32 sasAddrHi, bit32 sasAddrLo ) { dmList_t *ExpanderList; dmExpander_t *tempExpander; dmIntPortContext_t *tmpOnePortContext = onePortContext; DM_DBG3(("dmExpFind: start\n")); tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); if (DMLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { DM_DBG3(("dmExpFind tdsaDumpAllExp: empty discoveringExpanderList\n")); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); return agNULL; } else { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; while (ExpanderList != &(tmpOnePortContext->discovery.discoveringExpanderList)) { tempExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); if (tempExpander == agNULL) { DM_DBG1(("dmExpFind: tempExpander is NULL!!!\n")); return agNULL; } DM_DBG3(("dmExpFind: expander id %d\n", tempExpander->id)); DM_DBG3(("dmExpFind: exp addrHi 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmExpFind: exp addrLo 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressLo)); if ((tempExpander->dmDevice->SASAddressID.sasAddressHi == sasAddrHi) && (tempExpander->dmDevice->SASAddressID.sasAddressLo == sasAddrLo) && (tempExpander->dmDevice->dmPortContext == onePortContext) ) { DM_DBG3(("dmExpFind: found\n")); return tempExpander; } ExpanderList = ExpanderList->flink; } return agNULL; } osGLOBAL bit32 dmDiscoverCheck( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { DM_DBG3(("dmDiscoverCheck: start\n")); if (onePortContext == agNULL) { DM_DBG1(("dmDiscoverCheck: onePortContext is NULL!!!\n")); return agTRUE; } if (onePortContext->valid == agFALSE) { DM_DBG1(("dmDiscoverCheck: invalid port!!!\n")); return agTRUE; } if (onePortContext->DiscoveryState == DM_DSTATE_COMPLETED || onePortContext->discovery.status == DISCOVERY_SAS_DONE ) { DM_DBG1(("dmDiscoverCheck: aborted discovery!!!\n")); tddmDiscoverCB( dmRoot, onePortContext->dmPortContext, dmDiscAborted ); return agTRUE; } return agFALSE; } /* ??? needs to handle pending SMPs move from dmAllShared->discoveringExpanderList to dmAllShared->mainExpanderList */ osGLOBAL void dmDiscoverAbort( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { DM_DBG1(("dmDiscoverAbort: start\n")); if (onePortContext->DiscoveryState == DM_DSTATE_COMPLETED || onePortContext->discovery.status == DISCOVERY_SAS_DONE) { DM_DBG1(("dmDiscoverAbort: not allowed case!!! onePortContext->DiscoveryState 0x%x onePortContext->discovery.status 0x%x\n", onePortContext->DiscoveryState, onePortContext->discovery.status)); return; } onePortContext->DiscoveryState = DM_DSTATE_COMPLETED; onePortContext->discovery.status = DISCOVERY_SAS_DONE; /* move from dmAllShared->discoveringExpanderList to dmAllShared->mainExpanderList */ dmCleanAllExp(dmRoot, onePortContext); return; } /* move from dmAllShared->discoveringExpanderList to dmAllShared->mainExpanderList */ osGLOBAL void dmCleanAllExp( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmList_t *ExpanderList; dmExpander_t *tempExpander; dmExpander_t *oneExpander = agNULL; dmIntPortContext_t *tmpOnePortContext = onePortContext; DM_DBG3(("dmCleanAllExp: start\n")); DM_DBG3(("dmCleanAllExp: pid %d\n", onePortContext->id)); DM_DBG3(("dmCleanAllExp: before all clean up\n")); dmDumpAllFreeExp(dmRoot); /* clean up UpdiscoveringExpanderList*/ DM_DBG3(("dmCleanAllExp: clean discoveringExpanderList\n")); if (!DMLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; while (ExpanderList != &(tmpOnePortContext->discovery.discoveringExpanderList)) { tempExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); if (tempExpander == agNULL) { DM_DBG1(("dmCleanAllExp: tempExpander is NULL!!!\n")); return; } DM_DBG3(("dmCleanAllExp: exp addrHi 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmCleanAllExp: exp addrLo 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressLo)); DM_DBG3(("dmCleanAllExp: exp id %d\n", tempExpander->id)); oneExpander = dmExpMainListFind(dmRoot, tmpOnePortContext, tempExpander->dmDevice->SASAddressID.sasAddressHi, tempExpander->dmDevice->SASAddressID.sasAddressLo); if (oneExpander == agNULL) { DM_DBG3(("dmCleanAllExp: moving\n")); DM_DBG3(("dmCleanAllExp: moving, exp id %d\n", tempExpander->id)); /* putting back to the free pool */ tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); DMLIST_DEQUEUE_THIS(&(tempExpander->linkNode)); // DMLIST_ENQUEUE_AT_TAIL(&(tempExpander->linkNode), &(dmAllShared->freeExpanderList)); DMLIST_ENQUEUE_AT_TAIL(&(tempExpander->linkNode), &(dmAllShared->mainExpanderList)); if (DMLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); break; } else { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; } else { DM_DBG3(("dmCleanAllExp: in mainExpanderList; skippig\n")); ExpanderList = ExpanderList->flink; } } } else { DM_DBG3(("dmCleanAllExp: empty discoveringExpanderList\n")); } /* reset discoveringExpanderList */ DMLIST_INIT_HDR(&(tmpOnePortContext->discovery.discoveringExpanderList)); /* clean up UpdiscoveringExpanderList*/ DM_DBG3(("dmCleanAllExp: clean UpdiscoveringExpanderList\n")); if (DMLIST_EMPTY(&(tmpOnePortContext->discovery.UpdiscoveringExpanderList))) { DM_DBG3(("dmCleanAllExp: empty UpdiscoveringExpanderList\n")); return; } ExpanderList = tmpOnePortContext->discovery.UpdiscoveringExpanderList.flink; while (ExpanderList != &(tmpOnePortContext->discovery.UpdiscoveringExpanderList)) { tempExpander = DMLIST_OBJECT_BASE(dmExpander_t, upNode, ExpanderList); if (tempExpander == agNULL) { DM_DBG1(("dmCleanAllExp: tempExpander is NULL!!!\n")); return; } DM_DBG3(("dmCleanAllExp: exp addrHi 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmCleanAllExp: exp addrLo 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressLo)); DM_DBG3(("dmCleanAllExp: exp id %d\n", tempExpander->id)); oneExpander = dmExpMainListFind(dmRoot, tmpOnePortContext, tempExpander->dmDevice->SASAddressID.sasAddressHi, tempExpander->dmDevice->SASAddressID.sasAddressLo); if (oneExpander == agNULL) { DM_DBG3(("dmCleanAllExp: moving\n")); DM_DBG3(("dmCleanAllExp: moving exp id %d\n", tempExpander->id)); tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); DMLIST_DEQUEUE_THIS(&(tempExpander->upNode)); DMLIST_ENQUEUE_AT_TAIL(&(tempExpander->linkNode), &(dmAllShared->mainExpanderList)); if (DMLIST_EMPTY(&(tmpOnePortContext->discovery.UpdiscoveringExpanderList))) { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); break; } else { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } ExpanderList = tmpOnePortContext->discovery.UpdiscoveringExpanderList.flink; } else { DM_DBG3(("dmCleanAllExp: in mainExpanderList; skippig\n")); ExpanderList = ExpanderList->flink; } } /* reset UpdiscoveringExpanderList */ DMLIST_INIT_HDR(&(tmpOnePortContext->discovery.UpdiscoveringExpanderList)); DM_DBG3(("dmCleanAllExp: after all clean up\n")); dmDumpAllFreeExp(dmRoot); return; } osGLOBAL void dmInternalRemovals( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; DM_DBG3(("dmInternalRemovals: start\n")); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); if (DMLIST_EMPTY(&(dmAllShared->MainDeviceList))) { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); DM_DBG3(("dmInternalRemovals: empty device list\n")); return; } else { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); } DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmInternalRemovals: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmInternalRemovals: loop did %d\n", oneDeviceData->id)); DM_DBG3(("dmInternalRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmInternalRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG3(("dmInternalRemovals: valid %d\n", oneDeviceData->valid)); DM_DBG3(("dmInternalRemovals: valid2 %d\n", oneDeviceData->valid2)); DM_DBG3(("dmInternalRemovals: directlyAttached %d\n", oneDeviceData->directlyAttached)); if ( oneDeviceData->dmPortContext == onePortContext) { DM_DBG3(("dmInternalRemovals: right portcontext pid %d\n", onePortContext->id)); if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_INCREMENTAL_START) { DM_DBG3(("dmInternalRemovals: incremental discovery\n")); oneDeviceData->valid2 = agFALSE; } else { DM_DBG3(("dmInternalRemovals: full discovery\n")); oneDeviceData->valid = agFALSE; } DeviceListList = DeviceListList->flink; } else { if (oneDeviceData->dmPortContext != agNULL) { DM_DBG3(("dmInternalRemovals: different portcontext; oneDeviceData->dmPortContext pid %d oneportcontext pid %d\n", oneDeviceData->dmPortContext->id, onePortContext->id)); } else { DM_DBG3(("dmInternalRemovals: different portcontext; oneDeviceData->dmPortContext pid NULL oneportcontext pid %d\n", onePortContext->id)); } DeviceListList = DeviceListList->flink; } } return; } osGLOBAL void dmDiscoveryResetProcessed( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; DM_DBG3(("dmDiscoveryResetProcessed: start\n")); /* reinitialize the device data belonging to this portcontext */ DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmDiscoveryResetProcessed: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmDiscoveryResetProcessed: loop did %d\n", oneDeviceData->id)); if (oneDeviceData->dmPortContext == onePortContext) { DM_DBG3(("dmDiscoveryResetProcessed: resetting procssed flag\n")); oneDeviceData->processed = agFALSE; } DeviceListList = DeviceListList->flink; } return; } /* calls osGLOBAL void tddmDiscoverCB( dmRoot_t *dmRoot, dmPortContext_t *dmPortContext, bit32 eventStatus ) */ osGLOBAL void dmDiscoverDone( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, bit32 flag ) { DM_DBG3(("dmDiscoverDone: start\n")); DM_DBG3(("dmDiscoverDone: pid %d\n", onePortContext->id)); /* Set discovery status */ onePortContext->discovery.status = DISCOVERY_SAS_DONE; /* clean up expanders data strucures; move to free exp when device is cleaned */ dmCleanAllExp(dmRoot, onePortContext); dmDumpAllMainExp(dmRoot, onePortContext); dmDiscoveryResetProcessed(dmRoot, onePortContext); dmDiscoveryDumpMCN(dmRoot, onePortContext); if (onePortContext->discovery.SeenBC == agTRUE) { DM_DBG3(("dmDiscoverDone: broadcast change; discover again\n")); dmDiscoveryResetMCN(dmRoot, onePortContext); dmInternalRemovals(dmRoot, onePortContext); /* processed broadcast change */ onePortContext->discovery.SeenBC = agFALSE; if (onePortContext->discovery.ResetTriggerred == agTRUE) { DM_DBG3(("dmDiscoverDone: dmBCTimer\n")); dmBCTimer(dmRoot, onePortContext); } else { dmIncrementalDiscover(dmRoot, onePortContext, agTRUE); } } else { onePortContext->DiscoveryState = DM_DSTATE_COMPLETED; if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_FULL_START) { if (flag == DM_RC_SUCCESS) { dmResetReported(dmRoot, onePortContext ); dmDiscoveryReportMCN(dmRoot, onePortContext ); /* call tddmDiscoverCB() */ tddmDiscoverCB( dmRoot, onePortContext->dmPortContext, dmDiscCompleted ); } else if (flag != DM_RC_SUCCESS || onePortContext->discovery.DeferredError == agTRUE) { onePortContext->DiscoveryState = DM_DSTATE_COMPLETED_WITH_FAILURE; DM_DBG1(("dmDiscoverDone: Error; clean up!!!\n")); dmDiscoveryInvalidateDevices(dmRoot, onePortContext ); tddmDiscoverCB( dmRoot, onePortContext->dmPortContext, dmDiscFailed ); } } else { if (flag == DM_RC_SUCCESS) { dmReportChanges(dmRoot, onePortContext ); dmDiscoveryReportMCN(dmRoot, onePortContext ); tddmDiscoverCB( dmRoot, onePortContext->dmPortContext, dmDiscCompleted ); } else if (flag != DM_RC_SUCCESS || onePortContext->discovery.DeferredError == agTRUE) { onePortContext->DiscoveryState = DM_DSTATE_COMPLETED_WITH_FAILURE; dmDiscoveryInvalidateDevices(dmRoot, onePortContext ); tddmDiscoverCB( dmRoot, onePortContext->dmPortContext, dmDiscFailed ); } } } return; } /* called by dmDiscoveryErrorRemovals() or dmReportRemovals() on discovery failure */ osGLOBAL void dmSubReportRemovals( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmDeviceData_t *oneDeviceData, bit32 flag ) { dmDeviceData_t *oneAttachedExpDeviceData = agNULL; DM_DBG3(("dmSubReportRemovals: start\n")); DM_DBG3(("dmSubReportRemovals: flag 0x%x\n", flag)); if (flag == dmDeviceRemoval) { oneDeviceData->registered = agFALSE; } if (oneDeviceData->ExpDevice != agNULL) { DM_DBG3(("dmSubReportRemovals: attached expander case\n")); oneAttachedExpDeviceData = oneDeviceData->ExpDevice; tddmReportDevice(dmRoot, onePortContext->dmPortContext, &oneDeviceData->dmDeviceInfo, &oneAttachedExpDeviceData->dmDeviceInfo, flag); } else { DM_DBG3(("dmSubReportRemovals: NO attached expander case\n")); tddmReportDevice(dmRoot, onePortContext->dmPortContext, &oneDeviceData->dmDeviceInfo, agNULL, flag); } /* this function is called at the end of discovery; reinitializes oneDeviceData->reported */ oneDeviceData->reported = agFALSE; return; } /* called by dmReportChanges() on discovery success */ osGLOBAL void dmSubReportChanges( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmDeviceData_t *oneDeviceData, bit32 flag ) { dmDeviceData_t *oneAttachedExpDeviceData = agNULL; DM_DBG3(("dmSubReportChanges: start\n")); DM_DBG3(("dmSubReportChanges: flag 0x%x\n", flag)); if (flag == dmDeviceRemoval) { oneDeviceData->registered = agFALSE; } if (oneDeviceData->reported == agFALSE) { if (oneDeviceData->ExpDevice != agNULL) { DM_DBG3(("dmSubReportChanges: attached expander case\n")); oneAttachedExpDeviceData = oneDeviceData->ExpDevice; tddmReportDevice(dmRoot, onePortContext->dmPortContext, &oneDeviceData->dmDeviceInfo, &oneAttachedExpDeviceData->dmDeviceInfo, flag); } else { DM_DBG3(("dmSubReportChanges: NO attached expander case\n")); tddmReportDevice(dmRoot, onePortContext->dmPortContext, &oneDeviceData->dmDeviceInfo, agNULL, flag); } } else { DM_DBG3(("dmSubReportChanges: skip; been reported\n")); } /* this function is called at the end of discovery; reinitializes oneDeviceData->reported */ oneDeviceData->reported = agFALSE; return; } /* should add or remove be reported per device??? */ osGLOBAL void dmReportChanges( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; bit32 added = agFALSE, removed = agFALSE; // dmDeviceData_t *oneAttachedExpDeviceData = agNULL; DM_DBG3(("dmReportChanges: start\n")); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); if (DMLIST_EMPTY(&(dmAllShared->MainDeviceList))) { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); DM_DBG3(("dmReportChanges: empty device list\n")); return; } else { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); } DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmReportChanges: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmReportChanges: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmReportChanges: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); if ( oneDeviceData->dmPortContext == onePortContext) { DM_DBG3(("dmReportChanges: right portcontext\n")); if (oneDeviceData->SASAddressID.sasAddressHi == onePortContext->sasRemoteAddressHi && oneDeviceData->SASAddressID.sasAddressLo == onePortContext->sasRemoteAddressLo ) { DM_DBG1(("dmReportChanges: keep, not reporting did 0x%x\n", oneDeviceData->id)); oneDeviceData->valid = agTRUE; oneDeviceData->valid2 = agFALSE; } else if ( (oneDeviceData->valid == agTRUE) && (oneDeviceData->valid2 == agTRUE) ) { DM_DBG3(("dmReportChanges: same\n")); /* reset valid bit */ oneDeviceData->valid = oneDeviceData->valid2; oneDeviceData->valid2 = agFALSE; dmSubReportChanges(dmRoot, onePortContext, oneDeviceData, dmDeviceNoChange); } else if ( (oneDeviceData->valid == agTRUE) && (oneDeviceData->valid2 == agFALSE) ) { DM_DBG3(("dmReportChanges: removed\n")); removed = agTRUE; /* reset valid bit */ oneDeviceData->valid = oneDeviceData->valid2; oneDeviceData->valid2 = agFALSE; onePortContext->RegisteredDevNums--; dmSubReportChanges(dmRoot, onePortContext, oneDeviceData, dmDeviceRemoval); } else if ( (oneDeviceData->valid == agFALSE) && (oneDeviceData->valid2 == agTRUE) ) { DM_DBG3(("dmReportChanges: added\n")); added = agTRUE; /* reset valid bit */ oneDeviceData->valid = oneDeviceData->valid2; oneDeviceData->valid2 = agFALSE; dmSubReportChanges(dmRoot, onePortContext, oneDeviceData, dmDeviceArrival); } else { DM_DBG3(("dmReportChanges: else\n")); } } else { DM_DBG3(("dmReportChanges: different portcontext\n")); } DeviceListList = DeviceListList->flink; } /* osGLOBAL void tddmReportDevice( dmRoot_t *dmRoot, dmPortContext_t *dmPortContext, dmDeviceInfo_t *dmDeviceInfo, dmDeviceInfo_t *dmExpDeviceInfo, bit32 flag ) */ /* arrival or removal at once */ if (added == agTRUE) { DM_DBG3(("dmReportChanges: added at the end\n")); #if 0 /* TBD */ ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceArrival, agNULL ); #endif } if (removed == agTRUE) { DM_DBG3(("dmReportChanges: removed at the end\n")); #if 0 /* TBD */ ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceRemoval, agNULL ); #endif } if (onePortContext->discovery.forcedOK == agTRUE && added == agFALSE && removed == agFALSE) { DM_DBG3(("dmReportChanges: missed chance to report. forced to report OK\n")); onePortContext->discovery.forcedOK = agFALSE; #if 0 /* TBD */ ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscOK, agNULL ); #endif } if (added == agFALSE && removed == agFALSE) { DM_DBG3(("dmReportChanges: the same\n")); } return; } osGLOBAL void dmReportRemovals( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, bit32 flag ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; bit32 removed = agFALSE; DM_DBG1(("dmReportRemovals: start\n")); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); if (DMLIST_EMPTY(&(dmAllShared->MainDeviceList))) { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); DM_DBG3(("dmReportRemovals: empty device list\n")); return; } else { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); } DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmReportRemovals: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmReportRemovals: loop did %d\n", oneDeviceData->id)); DM_DBG3(("dmReportRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmReportRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG3(("dmReportRemovals: valid %d\n", oneDeviceData->valid)); DM_DBG3(("dmReportRemovals: valid2 %d\n", oneDeviceData->valid2)); DM_DBG3(("dmReportRemovals: directlyAttached %d\n", oneDeviceData->directlyAttached)); if ( oneDeviceData->dmPortContext == onePortContext) { DM_DBG3(("dmReportRemovals: right portcontext pid %d\n", onePortContext->id)); if (oneDeviceData->SASAddressID.sasAddressHi == onePortContext->sasRemoteAddressHi && oneDeviceData->SASAddressID.sasAddressLo == onePortContext->sasRemoteAddressLo ) { DM_DBG1(("dmReportRemovals: keeping\n")); oneDeviceData->valid = agTRUE; oneDeviceData->valid2 = agFALSE; } else if (oneDeviceData->valid == agTRUE) { DM_DBG3(("dmReportRemovals: removing\n")); /* notify only reported devices to OS layer*/ if ( DEVICE_IS_SSP_TARGET(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData) || DEVICE_IS_SATA_DEVICE(oneDeviceData) ) { removed = agTRUE; } /* all targets except expanders */ DM_DBG3(("dmReportRemovals: did %d\n", oneDeviceData->id)); DM_DBG3(("dmReportRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmReportRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); onePortContext->RegisteredDevNums--; dmSubReportRemovals(dmRoot, onePortContext, oneDeviceData, dmDeviceRemoval); /* reset valid bit */ oneDeviceData->valid = agFALSE; oneDeviceData->valid2 = agFALSE; } /* called by port invalid case */ if (flag == agTRUE) { oneDeviceData->dmPortContext = agNULL; } DeviceListList = DeviceListList->flink; } else { if (oneDeviceData->dmPortContext != agNULL) { DM_DBG3(("dmReportRemovals: different portcontext; oneDeviceData->dmPortContext pid %d oneportcontext pid %d\n", oneDeviceData->dmPortContext->id, onePortContext->id)); } else { DM_DBG3(("dmReportRemovals: different portcontext; oneDeviceData->dmPortContext pid NULL oneportcontext pid %d\n", onePortContext->id)); } DeviceListList = DeviceListList->flink; } } if (removed == agTRUE) { DM_DBG3(("dmReportRemovals: removed at the end\n")); #if 0 /* TBD */ ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceRemoval, agNULL ); #endif } return; } osGLOBAL void dmResetReported( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; DM_DBG3(("dmResetReported: start\n")); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); if (DMLIST_EMPTY(&(dmAllShared->MainDeviceList))) { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); DM_DBG3(("dmResetReported: empty device list\n")); return; } else { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); } DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmResetReported: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmResetReported: loop did %d\n", oneDeviceData->id)); DM_DBG3(("dmResetReported: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmResetReported: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG3(("dmResetReported: valid %d\n", oneDeviceData->valid)); DM_DBG3(("dmResetReported: valid2 %d\n", oneDeviceData->valid2)); DM_DBG3(("dmResetReported: directlyAttached %d\n", oneDeviceData->directlyAttached)); if ( oneDeviceData->dmPortContext == onePortContext) { DM_DBG3(("dmResetReported: right portcontext pid %d\n", onePortContext->id)); oneDeviceData->reported = agFALSE; DeviceListList = DeviceListList->flink; } else { if (oneDeviceData->dmPortContext != agNULL) { DM_DBG3(("dmResetReported: different portcontext; oneDeviceData->dmPortContext pid %d oneportcontext pid %d\n", oneDeviceData->dmPortContext->id, onePortContext->id)); } else { DM_DBG3(("dmResetReported: different portcontext; oneDeviceData->dmPortContext pid NULL oneportcontext pid %d\n", onePortContext->id)); } DeviceListList = DeviceListList->flink; } } return; } /* called on discover failure */ osGLOBAL void dmDiscoveryInvalidateDevices( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; DM_DBG1(("dmDiscoveryInvalidateDevices: start\n")); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); if (DMLIST_EMPTY(&(dmAllShared->MainDeviceList))) { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); DM_DBG3(("dmDiscoveryInvalidateDevices: empty device list\n")); return; } else { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); } DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmDiscoveryInvalidateDevices: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmDiscoveryInvalidateDevices: loop did %d\n", oneDeviceData->id)); DM_DBG3(("dmDiscoveryInvalidateDevices: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDiscoveryInvalidateDevices: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG3(("dmDiscoveryInvalidateDevices: valid %d\n", oneDeviceData->valid)); DM_DBG3(("dmDiscoveryInvalidateDevices: valid2 %d\n", oneDeviceData->valid2)); DM_DBG3(("dmDiscoveryInvalidateDevices: directlyAttached %d\n", oneDeviceData->directlyAttached)); if ( oneDeviceData->dmPortContext == onePortContext) { DM_DBG3(("dmDiscoveryInvalidateDevices: right portcontext pid %d\n", onePortContext->id)); if (oneDeviceData->SASAddressID.sasAddressHi == onePortContext->sasRemoteAddressHi && oneDeviceData->SASAddressID.sasAddressLo == onePortContext->sasRemoteAddressLo ) { DM_DBG1(("dmDiscoveryInvalidateDevices: keeping\n")); oneDeviceData->valid = agTRUE; oneDeviceData->valid2 = agFALSE; } else { oneDeviceData->valid = agFALSE; oneDeviceData->valid2 = agFALSE; oneDeviceData->registered = agFALSE; oneDeviceData->reported = agFALSE; /* all targets other than expanders */ DM_DBG3(("dmDiscoveryInvalidateDevices: did %d\n", oneDeviceData->id)); DM_DBG3(("dmDiscoveryInvalidateDevices: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDiscoveryInvalidateDevices: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); onePortContext->RegisteredDevNums--; } DeviceListList = DeviceListList->flink; } else { if (oneDeviceData->dmPortContext != agNULL) { DM_DBG3(("dmDiscoveryInvalidateDevices: different portcontext; oneDeviceData->dmPortContext pid %d oneportcontext pid %d\n", oneDeviceData->dmPortContext->id, onePortContext->id)); } else { DM_DBG3(("dmDiscoveryInvalidateDevices: different portcontext; oneDeviceData->dmPortContext pid NULL oneportcontext pid %d\n", onePortContext->id)); } DeviceListList = DeviceListList->flink; } } return; } /* should DM report the device removal to TDM on an error case? or DM simply removes the devices For now, the second option. */ osGLOBAL void dmDiscoveryErrorRemovals( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; DM_DBG1(("dmDiscoveryErrorRemovals: start\n")); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); if (DMLIST_EMPTY(&(dmAllShared->MainDeviceList))) { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); DM_DBG3(("dmDiscoveryErrorRemovals: empty device list\n")); return; } else { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); } DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmDiscoveryErrorRemovals: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmDiscoveryErrorRemovals: loop did %d\n", oneDeviceData->id)); DM_DBG3(("dmDiscoveryErrorRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDiscoveryErrorRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); DM_DBG3(("dmDiscoveryErrorRemovals: valid %d\n", oneDeviceData->valid)); DM_DBG3(("dmDiscoveryErrorRemovals: valid2 %d\n", oneDeviceData->valid2)); DM_DBG3(("dmDiscoveryErrorRemovals: directlyAttached %d\n", oneDeviceData->directlyAttached)); if ( oneDeviceData->dmPortContext == onePortContext) { DM_DBG3(("dmDiscoveryErrorRemovals: right portcontext pid %d\n", onePortContext->id)); if (oneDeviceData->SASAddressID.sasAddressHi == onePortContext->sasRemoteAddressHi && oneDeviceData->SASAddressID.sasAddressLo == onePortContext->sasRemoteAddressLo ) { DM_DBG1(("dmDiscoveryErrorRemovals: keeping\n")); oneDeviceData->valid = agTRUE; oneDeviceData->valid2 = agFALSE; } else { oneDeviceData->valid = agFALSE; oneDeviceData->valid2 = agFALSE; /* all targets other than expanders */ DM_DBG3(("dmDiscoveryErrorRemovals: did %d\n", oneDeviceData->id)); DM_DBG3(("dmDiscoveryErrorRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDiscoveryErrorRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); onePortContext->RegisteredDevNums--; dmSubReportRemovals(dmRoot, onePortContext, oneDeviceData, dmDeviceRemoval); } DeviceListList = DeviceListList->flink; } else { if (oneDeviceData->dmPortContext != agNULL) { DM_DBG3(("dmDiscoveryErrorRemovals: different portcontext; oneDeviceData->dmPortContext pid %d oneportcontext pid %d\n", oneDeviceData->dmPortContext->id, onePortContext->id)); } else { DM_DBG3(("dmDiscoveryErrorRemovals: different portcontext; oneDeviceData->dmPortContext pid NULL oneportcontext pid %d\n", onePortContext->id)); } DeviceListList = DeviceListList->flink; } } return; } /* move from dmAllShared->mainExpanderList to dmAllShared->freeExpanderList */ osGLOBAL void dmDiscoveryExpanderCleanUp( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmExpander_t *oneExpander = agNULL; dmList_t *ExpanderList = agNULL; dmDeviceData_t *oneDeviceData = agNULL; DM_DBG3(("dmDiscoveryExpanderCleanUp: start\n")); /* be sure to call osGLOBAL void dmExpanderDeviceDataReInit( dmRoot_t *dmRoot, dmExpander_t *oneExpander ); */ tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); if (!DMLIST_EMPTY(&(dmAllShared->mainExpanderList))) { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); ExpanderList = dmAllShared->mainExpanderList.flink; while (ExpanderList != &(dmAllShared->mainExpanderList)) { oneExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); if (oneExpander == agNULL) { DM_DBG1(("dmDiscoveryExpanderCleanUp: oneExpander is NULL!!!\n")); return; } oneDeviceData = oneExpander->dmDevice; DM_DBG3(("dmDiscoveryExpanderCleanUp: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDiscoveryExpanderCleanUp: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); if ( oneDeviceData->dmPortContext == onePortContext) { dmExpanderDeviceDataReInit(dmRoot, oneExpander); tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); DMLIST_DEQUEUE_THIS(&(oneExpander->linkNode)); DMLIST_ENQUEUE_AT_TAIL(&(oneExpander->linkNode), &(dmAllShared->freeExpanderList)); if (DMLIST_EMPTY(&(dmAllShared->mainExpanderList))) { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); break; } else { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } ExpanderList = dmAllShared->mainExpanderList.flink; } else { ExpanderList = ExpanderList->flink; } } } else { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); DM_DBG3(("dmDiscoveryExpanderCleanUp: empty mainExpanderList\n")); } return; } /* moves all devices from dmAllShared->MainDeviceList to dmAllShared->FreeDeviceList */ osGLOBAL void dmDiscoveryDeviceCleanUp( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; DM_DBG3(("dmDiscoveryDeviceCleanUp: start\n")); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); if (!DMLIST_EMPTY(&(dmAllShared->MainDeviceList))) { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmDiscoveryDeviceCleanUp: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmDiscoveryDeviceCleanUp: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDiscoveryDeviceCleanUp: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); if ( oneDeviceData->dmPortContext == onePortContext) { dmDeviceDataReInit(dmRoot, oneDeviceData); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); DMLIST_DEQUEUE_THIS(&(oneDeviceData->MainLink)); DMLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->FreeLink), &(dmAllShared->FreeDeviceList)); if (DMLIST_EMPTY(&(dmAllShared->MainDeviceList))) { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); break; } else { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); } onePortContext->RegisteredDevNums--; DeviceListList = dmAllShared->MainDeviceList.flink; } else { DeviceListList = DeviceListList->flink; } } } else { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); DM_DBG3(("dmDiscoveryDeviceCleanUp: empty MainDeviceList\n")); } return; } osGLOBAL void dmDumpAllExp( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { DM_DBG3(("dmDumpAllExp: start\n")); return; } osGLOBAL void dmDumpAllUpExp( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander ) { DM_DBG3(("dmDumpAllUpExp: start\n")); return; } osGLOBAL void dmDumpAllFreeExp( dmRoot_t *dmRoot ) { DM_DBG3(("dmDumpAllFreeExp: start\n")); return; } osGLOBAL void dmDumpAllMainExp( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmList_t *ExpanderList; dmExpander_t *tempExpander; DM_DBG3(("dmDumpAllMainExp: start\n")); tddmSingleThreadedEnter(dmRoot, DM_EXPANDER_LOCK); if (DMLIST_EMPTY(&(dmAllShared->mainExpanderList))) { DM_DBG3(("dmDumpAllMainExp: empty discoveringExpanderList\n")); tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); return; } else { tddmSingleThreadedLeave(dmRoot, DM_EXPANDER_LOCK); } ExpanderList = dmAllShared->mainExpanderList.flink; while (ExpanderList != &(dmAllShared->mainExpanderList)) { tempExpander = DMLIST_OBJECT_BASE(dmExpander_t, linkNode, ExpanderList); if (tempExpander == agNULL) { DM_DBG1(("dmDumpAllMainExp: tempExpander is NULL!!!\n")); return; } DM_DBG3(("dmDumpAllMainExp: expander id %d\n", tempExpander->id)); DM_DBG3(("dmDumpAllMainExp: exp addrHi 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmDumpAllMainExp: exp addrLo 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressLo)); if ((tempExpander->dmDevice->dmPortContext == onePortContext) ) { DM_DBG3(("dmDumpAllMainExp: found expander id %d\n", tempExpander->id)); DM_DBG3(("dmDumpAllMainExp: found exp addrHi 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressHi)); DM_DBG3(("dmDumpAllMainExp: found exp addrLo 0x%08x\n", tempExpander->dmDevice->SASAddressID.sasAddressLo)); } ExpanderList = ExpanderList->flink; } return; } osGLOBAL void dmDumpAllMainDevice( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; bit32 total = 0, port_total = 0; DM_DBG3(("dmDumpAllMainDevice: start\n")); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); if (DMLIST_EMPTY(&(dmAllShared->MainDeviceList))) { DM_DBG3(("dmDumpAllMainDevice: empty discoveringExpanderList\n")); tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); return; } else { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); } DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG3(("dmDumpAllMainDevice: oneDeviceData is NULL!!!\n")); return; } DM_DBG3(("dmDumpAllMainDevice: oneDeviceData id %d\n", oneDeviceData->id)); DM_DBG3(("dmDumpAllMainDevice: addrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDumpAllMainDevice: addrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); total++; if ((oneDeviceData->dmPortContext == onePortContext) ) { DM_DBG3(("dmDumpAllMainDevice: found oneDeviceData id %d\n", oneDeviceData->id)); DM_DBG3(("dmDumpAllMainDevice: found addrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDumpAllMainDevice: found addrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); port_total++; } DeviceListList = DeviceListList->flink; } DM_DBG3(("dmDumpAllMainDevice: total %d port_totaol %d\n", total, port_total)); return; } osGLOBAL dmDeviceData_t * dmAddSASToSharedcontext( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmSASSubID_t *dmSASSubID, dmDeviceData_t *oneExpDeviceData, bit8 phyID ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; bit32 new_device = agTRUE; DM_DBG3(("dmAddSASToSharedcontext: start\n")); DM_DBG3(("dmAddSASToSharedcontext: oneportContext ID %d\n", onePortContext->id)); if (oneExpDeviceData != agNULL) { DM_DBG3(("dmAddSASToSharedcontext: oneExpDeviceData sasAddressHi 0x%08x sasAddressLo 0x%08x\n", oneExpDeviceData->SASAddressID.sasAddressHi, oneExpDeviceData->SASAddressID.sasAddressLo)); } else { DM_DBG3(("dmAddSASToSharedcontext: oneExpDeviceData is NULL\n")); } /* find a device's existence */ DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmAddSASToSharedcontext: oneDeviceData is NULL!!!\n")); return agNULL; } if ((oneDeviceData->SASAddressID.sasAddressHi == dmSASSubID->sasAddressHi) && (oneDeviceData->SASAddressID.sasAddressLo == dmSASSubID->sasAddressLo) && (oneDeviceData->dmPortContext == onePortContext) ) { DM_DBG3(("dmAddSASToSharedcontext: pid %d did %d\n", onePortContext->id, oneDeviceData->id)); new_device = agFALSE; break; } DeviceListList = DeviceListList->flink; } /* new device */ if (new_device == agTRUE) { DM_DBG3(("dmAddSASToSharedcontext: new device\n")); DM_DBG3(("dmAddSASToSharedcontext: sasAddressHi 0x%08x sasAddressLo 0x%08x\n", dmSASSubID->sasAddressHi, dmSASSubID->sasAddressLo)); tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); if (!DMLIST_NOT_EMPTY(&(dmAllShared->FreeDeviceList))) { tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); DM_DBG1(("dmAddSASToSharedcontext: empty DeviceData FreeLink\n")); dmDumpAllMainDevice(dmRoot, onePortContext); return agNULL; } DMLIST_DEQUEUE_FROM_HEAD(&DeviceListList, &(dmAllShared->FreeDeviceList)); tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, FreeLink, DeviceListList); if (oneDeviceData != agNULL) { DM_DBG3(("dmAddSASToSharedcontext: oneDeviceData %p pid %d did %d\n", oneDeviceData, onePortContext->id, oneDeviceData->id)); onePortContext->Count++; oneDeviceData->dmRoot = dmRoot; /* saving sas address */ oneDeviceData->SASAddressID.sasAddressLo = dmSASSubID->sasAddressLo; oneDeviceData->SASAddressID.sasAddressHi = dmSASSubID->sasAddressHi; oneDeviceData->initiator_ssp_stp_smp = dmSASSubID->initiator_ssp_stp_smp; oneDeviceData->target_ssp_stp_smp = dmSASSubID->target_ssp_stp_smp; oneDeviceData->dmPortContext = onePortContext; /* handles both SAS target and STP-target, SATA-device */ if (!DEVICE_IS_SATA_DEVICE(oneDeviceData) && !DEVICE_IS_STP_TARGET(oneDeviceData)) { oneDeviceData->DeviceType = DM_SAS_DEVICE; } else { oneDeviceData->DeviceType = DM_SATA_DEVICE; } if (oneExpDeviceData != agNULL) { oneDeviceData->ExpDevice = oneExpDeviceData; } /* set phyID only when it has initial value of 0xFF */ if (oneDeviceData->phyID == 0xFF) { oneDeviceData->phyID = phyID; } /* incremental discovery */ /* add device to incremental-related link. Report using this link when incremental discovery is done */ if (onePortContext->DiscoveryState == DM_DSTATE_NOT_STARTED) { DM_DBG3(("dmAddSASToSharedcontext: DM_DSTATE_NOT_STARTED\n")); DM_DBG3(("dmAddSASToSharedcontext: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmAddSASToSharedcontext: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); oneDeviceData->valid = agTRUE; } else { if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_INCREMENTAL_START) { DM_DBG3(("dmAddSASToSharedcontext: incremental discovery\n")); DM_DBG3(("dmAddSASToSharedcontext: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmAddSASToSharedcontext: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); oneDeviceData->valid2 = agTRUE; } else { DM_DBG3(("dmAddSASToSharedcontext: full discovery\n")); DM_DBG3(("dmAddSASToSharedcontext: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmAddSASToSharedcontext: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); oneDeviceData->valid = agTRUE; } } /* add the devicedata to the portcontext */ tddmSingleThreadedEnter(dmRoot, DM_DEVICE_LOCK); DMLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->MainLink), &(dmAllShared->MainDeviceList)); tddmSingleThreadedLeave(dmRoot, DM_DEVICE_LOCK); DM_DBG3(("dmAddSASToSharedcontext: one case pid %d did %d \n", onePortContext->id, oneDeviceData->id)); DM_DBG3(("dmAddSASToSharedcontext: new case pid %d did %d phyID %d\n", onePortContext->id, oneDeviceData->id, oneDeviceData->phyID)); } } else /* old device */ { DM_DBG3(("dmAddSASToSharedcontext: old device\n")); DM_DBG3(("dmAddSASToSharedcontext: oneDeviceData %p did %d\n", oneDeviceData, oneDeviceData->id)); DM_DBG3(("dmAddSASToSharedcontext: sasAddressHi 0x%08x sasAddressLo 0x%08x\n", dmSASSubID->sasAddressHi, dmSASSubID->sasAddressLo)); oneDeviceData->dmRoot = dmRoot; /* saving sas address */ oneDeviceData->SASAddressID.sasAddressLo = dmSASSubID->sasAddressLo; oneDeviceData->SASAddressID.sasAddressHi = dmSASSubID->sasAddressHi; oneDeviceData->initiator_ssp_stp_smp = dmSASSubID->initiator_ssp_stp_smp; oneDeviceData->target_ssp_stp_smp = dmSASSubID->target_ssp_stp_smp; oneDeviceData->dmPortContext = onePortContext; /* handles both SAS target and STP-target, SATA-device */ if (!DEVICE_IS_SATA_DEVICE(oneDeviceData) && !DEVICE_IS_STP_TARGET(oneDeviceData)) { oneDeviceData->DeviceType = DM_SAS_DEVICE; } else { oneDeviceData->DeviceType = DM_SATA_DEVICE; } if (oneExpDeviceData != agNULL) { oneDeviceData->ExpDevice = oneExpDeviceData; } /* set phyID only when it has initial value of 0xFF */ if (oneDeviceData->phyID == 0xFF) { oneDeviceData->phyID = phyID; } if (onePortContext->DiscoveryState == DM_DSTATE_NOT_STARTED) { DM_DBG3(("dmAddSASToSharedcontext: DM_DSTATE_NOT_STARTED\n")); DM_DBG3(("dmAddSASToSharedcontext: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmAddSASToSharedcontext: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); oneDeviceData->valid = agTRUE; } else { if (onePortContext->discovery.type == DM_DISCOVERY_OPTION_INCREMENTAL_START) { DM_DBG3(("dmAddSASToSharedcontext: incremental discovery\n")); DM_DBG3(("dmAddSASToSharedcontext: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmAddSASToSharedcontext: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); oneDeviceData->valid2 = agTRUE; } else { DM_DBG3(("dmAddSASToSharedcontext: full discovery\n")); DM_DBG3(("dmAddSASToSharedcontext: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmAddSASToSharedcontext: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); oneDeviceData->valid = agTRUE; } } DM_DBG3(("dmAddSASToSharedcontext: old case pid %d did %d phyID %d\n", onePortContext->id, oneDeviceData->id, oneDeviceData->phyID)); } return oneDeviceData; } /* no checking of valid and valid2 */ osGLOBAL dmDeviceData_t * dmDeviceFind( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, bit32 sasAddrHi, bit32 sasAddrLo ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDeviceData_t *oneDeviceData = agNULL; dmList_t *DeviceListList; bit32 found = agFALSE; DM_DBG3(("dmDeviceFind: start\n")); /* find a device's existence */ DeviceListList = dmAllShared->MainDeviceList.flink; while (DeviceListList != &(dmAllShared->MainDeviceList)) { oneDeviceData = DMLIST_OBJECT_BASE(dmDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { DM_DBG1(("dmDeviceFind: oneDeviceData is NULL!!!\n")); return agNULL; } if ((oneDeviceData->SASAddressID.sasAddressHi == sasAddrHi) && (oneDeviceData->SASAddressID.sasAddressLo == sasAddrLo) && // (oneDeviceData->valid == agTRUE) && (oneDeviceData->dmPortContext == onePortContext) ) { DM_DBG3(("dmDeviceFind: Found pid %d did %d\n", onePortContext->id, oneDeviceData->id)); DM_DBG3(("dmDeviceFind: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); DM_DBG3(("dmDeviceFind: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); found = agTRUE; break; } DeviceListList = DeviceListList->flink; } if (found == agFALSE) { DM_DBG3(("dmDeviceFind: end returning NULL\n")); return agNULL; } else { DM_DBG3(("dmDeviceFind: end returning NOT NULL\n")); return oneDeviceData; } } osGLOBAL void dmBCTimer( dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDiscovery_t *discovery; DM_DBG3(("dmBCTimer: start\n")); discovery = &(onePortContext->discovery); tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->BCTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->BCTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } if (onePortContext->valid == agTRUE) { dmSetTimerRequest( dmRoot, &discovery->BCTimer, BC_TIMER_VALUE/dmAllShared->usecsPerTick, dmBCTimerCB, onePortContext, agNULL, agNULL ); dmAddTimer( dmRoot, &dmAllShared->timerlist, &discovery->BCTimer ); } return; } osGLOBAL void dmBCTimerCB( dmRoot_t * dmRoot, void * timerData1, void * timerData2, void * timerData3 ) { dmIntPortContext_t *onePortContext; dmDiscovery_t *discovery; DM_DBG3(("dmBCTimerCB: start\n")); onePortContext = (dmIntPortContext_t *)timerData1; discovery = &(onePortContext->discovery); tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->BCTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->BCTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } discovery->ResetTriggerred = agFALSE; if (onePortContext->valid == agTRUE) { dmDiscover(dmRoot, onePortContext->dmPortContext, DM_DISCOVERY_OPTION_INCREMENTAL_START ); } return; } /* discovery related SMP timers */ osGLOBAL void dmDiscoverySMPTimer(dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, bit32 functionCode, dmSMPRequestBody_t *dmSMPRequestBody ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDiscovery_t *discovery; DM_DBG3(("dmDiscoverySMPTimer: start\n")); DM_DBG3(("dmDiscoverySMPTimer: pid %d SMPFn 0x%x\n", onePortContext->id, functionCode)); /* start the SMP timer which works as SMP application timer */ discovery = &(onePortContext->discovery); tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->DiscoverySMPTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->DiscoverySMPTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } dmSetTimerRequest( dmRoot, &discovery->DiscoverySMPTimer, SMP_TIMER_VALUE/dmAllShared->usecsPerTick, dmDiscoverySMPTimerCB, onePortContext, dmSMPRequestBody, agNULL ); dmAddTimer ( dmRoot, &dmAllShared->timerlist, &discovery->DiscoverySMPTimer ); return; } osGLOBAL void dmDiscoverySMPTimerCB( dmRoot_t * dmRoot, void * timerData1, void * timerData2, void * timerData3 ) { agsaRoot_t *agRoot; dmIntPortContext_t *onePortContext; bit8 SMPFunction; #ifndef DIRECT_SMP dmSMPFrameHeader_t *dmSMPFrameHeader; bit8 smpHeader[4]; #endif dmSMPRequestBody_t *dmSMPRequestBody; dmDiscovery_t *discovery; dmDeviceData_t *oneDeviceData; agsaIORequest_t *agAbortIORequest = agNULL; agsaIORequest_t *agToBeAbortIORequest = agNULL; dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmExpander_t *oneExpander = agNULL; dmSMPRequestBody_t *dmAbortSMPRequestBody = agNULL; dmList_t *SMPList; DM_DBG1(("dmDiscoverySMPTimerCB: start!!!\n")); onePortContext = (dmIntPortContext_t *)timerData1; dmSMPRequestBody = (dmSMPRequestBody_t *)timerData2; discovery = &(onePortContext->discovery); oneDeviceData = dmSMPRequestBody->dmDevice; agToBeAbortIORequest = &(dmSMPRequestBody->agIORequest); agRoot = dmAllShared->agRoot; #ifdef DIRECT_SMP SMPFunction = dmSMPRequestBody->smpPayload[1]; #else saFrameReadBlock(agRoot, dmSMPRequestBody->IndirectSMP, 0, smpHeader, 4); dmSMPFrameHeader = (dmSMPFrameHeader_t *)smpHeader; SMPFunction = dmSMPFrameHeader->smpFunction; #endif DM_DBG3(("dmDiscoverySMPTimerCB: SMP function 0x%x\n", SMPFunction)); tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->DiscoverySMPTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->DiscoverySMPTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } //for debugging // saGetPendingPICI(agRoot); switch (SMPFunction) { case SMP_REPORT_GENERAL: /* fall through */ case SMP_DISCOVER: /* fall through */ case SMP_CONFIGURE_ROUTING_INFORMATION: /* fall through */ DM_DBG1(("dmDiscoverySMPTimerCB: failing discovery, SMP function 0x%x !!!\n", SMPFunction)); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); return; /* no more things to do */ case SMP_REPORT_PHY_SATA: DM_DBG1(("dmDiscoverySMPTimerCB: failing discovery, SMP function SMP_REPORT_PHY_SATA !!!\n")); dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); break; default: /* do nothing */ DM_DBG1(("dmDiscoverySMPTimerCB: Error, not allowed case!!!\n")); break; } if (oneDeviceData->registered == agTRUE && (oneDeviceData->valid == agTRUE || oneDeviceData->valid2 == agTRUE) ) { /* call to saSMPAbort(one) */ /* get an smp REQUEST from the free list */ tddmSingleThreadedEnter(dmRoot, DM_SMP_LOCK); if (DMLIST_EMPTY(&(dmAllShared->freeSMPList))) { DM_DBG1(("dmDiscoverySMPTimerCB: no free SMP, can't abort SMP!!!\n")); tddmSingleThreadedLeave(dmRoot, DM_SMP_LOCK); return; } else { DMLIST_DEQUEUE_FROM_HEAD(&SMPList, &(dmAllShared->freeSMPList)); tddmSingleThreadedLeave(dmRoot, DM_SMP_LOCK); dmAbortSMPRequestBody = DMLIST_OBJECT_BASE(dmSMPRequestBody_t, Link, SMPList); if (dmAbortSMPRequestBody == agNULL) { DM_DBG1(("dmDiscoverySMPTimerCB: dmAbortSMPRequestBody is NULL!!!\n")); return; } DM_DBG5(("dmDiscoverySMPTimerCB: SMP id %d\n", dmAbortSMPRequestBody->id)); } dmAbortSMPRequestBody->dmRoot = dmRoot; agAbortIORequest = &(dmAbortSMPRequestBody->agIORequest); agAbortIORequest->osData = (void *) dmAbortSMPRequestBody; agAbortIORequest->sdkData = agNULL; /* SALL takes care of this */ oneExpander = oneDeviceData->dmExpander; DM_DBG1(("dmDiscoverySMPTimerCB: calling saSMPAbort!!!\n")); saSMPAbort(agRoot, agAbortIORequest, 0, oneExpander->agDevHandle, 0, /* abort one */ agToBeAbortIORequest, dmSMPAbortCB ); } return; } osGLOBAL void dmSMPBusyTimer(dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmDeviceData_t *oneDeviceData, dmSMPRequestBody_t *dmSMPRequestBody ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDiscovery_t *discovery; DM_DBG3(("dmSMPBusyTimer: start\n")); DM_DBG3(("dmSMPBusyTimer: pid %d\n", onePortContext->id)); discovery = &(onePortContext->discovery); tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->SMPBusyTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->SMPBusyTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } dmSetTimerRequest( dmRoot, &discovery->SMPBusyTimer, SMP_BUSY_TIMER_VALUE/dmAllShared->usecsPerTick, dmSMPBusyTimerCB, onePortContext, oneDeviceData, dmSMPRequestBody ); dmAddTimer ( dmRoot, &dmAllShared->timerlist, &discovery->SMPBusyTimer ); return; } osGLOBAL void dmSMPBusyTimerCB( dmRoot_t * dmRoot, void * timerData1, void * timerData2, void * timerData3 ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; agsaRoot_t *agRoot; dmIntPortContext_t *onePortContext; dmDeviceData_t *oneDeviceData; dmSMPRequestBody_t *dmSMPRequestBody; agsaSASRequestBody_t *agSASRequestBody; agsaIORequest_t *agIORequest; agsaDevHandle_t *agDevHandle; dmDiscovery_t *discovery; bit32 status = AGSA_RC_FAILURE; dmExpander_t *oneExpander = agNULL; DM_DBG3(("dmSMPBusyTimerCB: start\n")); onePortContext = (dmIntPortContext_t *)timerData1; oneDeviceData = (dmDeviceData_t *)timerData2; dmSMPRequestBody = (dmSMPRequestBody_t *)timerData3; agRoot = dmAllShared->agRoot; agIORequest = &(dmSMPRequestBody->agIORequest); oneExpander = oneDeviceData->dmExpander; agDevHandle = oneExpander->agDevHandle; agSASRequestBody = &(dmSMPRequestBody->agSASRequestBody); discovery = &(onePortContext->discovery); discovery->SMPRetries++; if (discovery->SMPRetries < SMP_BUSY_RETRIES) { status = saSMPStart( agRoot, agIORequest, 0, agDevHandle, AGSA_SMP_INIT_REQ, agSASRequestBody, &dmsaSMPCompleted ); } if (status == AGSA_RC_SUCCESS) { discovery->SMPRetries = 0; tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->SMPBusyTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->SMPBusyTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } } else if (status == AGSA_RC_FAILURE) { tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->SMPBusyTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->SMPBusyTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } discovery->SMPRetries = 0; dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } else /* AGSA_RC_BUSY */ { if (discovery->SMPRetries >= SMP_BUSY_RETRIES) { /* done with retris; give up */ DM_DBG3(("dmSMPBusyTimerCB: retries are over\n")); tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->SMPBusyTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->SMPBusyTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } discovery->SMPRetries = 0; dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); } else { /* keep retrying */ dmSMPBusyTimer(dmRoot, onePortContext, oneDeviceData, dmSMPRequestBody); } } return; } /* expander configuring timer */ osGLOBAL void dmDiscoveryConfiguringTimer(dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmDeviceData_t *oneDeviceData ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDiscovery_t *discovery; DM_DBG3(("dmDiscoveryConfiguringTimer: start\n")); DM_DBG3(("dmDiscoveryConfiguringTimer: pid %d\n", onePortContext->id)); discovery = &(onePortContext->discovery); tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->discoveryTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->discoveryTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } DM_DBG3(("dmDiscoveryConfiguringTimer: UsecsPerTick %d\n", dmAllShared->usecsPerTick)); DM_DBG3(("dmDiscoveryConfiguringTimer: Timervalue %d\n", DISCOVERY_CONFIGURING_TIMER_VALUE/dmAllShared->usecsPerTick)); dmSetTimerRequest( dmRoot, &discovery->discoveryTimer, DISCOVERY_CONFIGURING_TIMER_VALUE/dmAllShared->usecsPerTick, dmDiscoveryConfiguringTimerCB, onePortContext, oneDeviceData, agNULL ); dmAddTimer ( dmRoot, &dmAllShared->timerlist, &discovery->discoveryTimer ); return; } osGLOBAL void dmDiscoveryConfiguringTimerCB( dmRoot_t * dmRoot, void * timerData1, void * timerData2, void * timerData3 ) { dmIntPortContext_t *onePortContext = agNULL; dmDiscovery_t *discovery = agNULL; dmDeviceData_t *oneDeviceData = agNULL; onePortContext = (dmIntPortContext_t *)timerData1; oneDeviceData = (dmDeviceData_t *)timerData2; discovery = &(onePortContext->discovery); DM_DBG3(("dmDiscoveryConfiguringTimerCB: start\n")); tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->discoveryTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->discoveryTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } if (oneDeviceData->valid == agTRUE || oneDeviceData->valid2 == agTRUE) { dmReportGeneralSend(dmRoot, oneDeviceData); } return; } osGLOBAL void dmConfigureRouteTimer(dmRoot_t *dmRoot, dmIntPortContext_t *onePortContext, dmExpander_t *oneExpander, smpRespDiscover_t *pdmSMPDiscoverResp, smpRespDiscover2_t *pdmSMPDiscover2Resp ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmDiscovery_t *discovery; DM_DBG3(("dmConfigureRouteTimer: start\n")); DM_DBG3(("dmConfigureRouteTimer: pid %d\n", onePortContext->id)); discovery = &(onePortContext->discovery); DM_DBG3(("dmConfigureRouteTimer: onePortContext %p oneExpander %p pdmSMPDiscoverResp %p\n", onePortContext, oneExpander, pdmSMPDiscoverResp)); DM_DBG3(("dmConfigureRouteTimer: discovery %p \n", discovery)); DM_DBG3(("dmConfigureRouteTimer: pid %d configureRouteRetries %d\n", onePortContext->id, discovery->configureRouteRetries)); DM_DBG3(("dmConfigureRouteTimer: discovery->status %d\n", discovery->status)); tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->configureRouteTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->configureRouteTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } DM_DBG3(("dmConfigureRouteTimer: UsecsPerTick %d\n", dmAllShared->usecsPerTick)); DM_DBG3(("dmConfigureRouteTimer: Timervalue %d\n", CONFIGURE_ROUTE_TIMER_VALUE/dmAllShared->usecsPerTick)); if (oneExpander->SAS2 == 0) { /* SAS 1.1 */ dmSetTimerRequest( dmRoot, &discovery->configureRouteTimer, CONFIGURE_ROUTE_TIMER_VALUE/dmAllShared->usecsPerTick, dmConfigureRouteTimerCB, (void *)onePortContext, (void *)oneExpander, (void *)pdmSMPDiscoverResp ); } else { /* SAS 2 */ dmSetTimerRequest( dmRoot, &discovery->configureRouteTimer, CONFIGURE_ROUTE_TIMER_VALUE/dmAllShared->usecsPerTick, dmConfigureRouteTimerCB, (void *)onePortContext, (void *)oneExpander, (void *)pdmSMPDiscover2Resp ); } dmAddTimer ( dmRoot, &dmAllShared->timerlist, &discovery->configureRouteTimer ); return; } osGLOBAL void dmConfigureRouteTimerCB( dmRoot_t * dmRoot, void * timerData1, void * timerData2, void * timerData3 ) { dmIntRoot_t *dmIntRoot = (dmIntRoot_t *)dmRoot->dmData; dmIntContext_t *dmAllShared = (dmIntContext_t *)&dmIntRoot->dmAllShared; dmIntPortContext_t *onePortContext; dmExpander_t *oneExpander; smpRespDiscover_t *pdmSMPDiscoverResp = agNULL; smpRespDiscover2_t *pdmSMPDiscover2Resp = agNULL; dmDiscovery_t *discovery; DM_DBG3(("dmConfigureRouteTimerCB: start\n")); onePortContext = (dmIntPortContext_t *)timerData1; oneExpander = (dmExpander_t *)timerData2; if (oneExpander->SAS2 == 0) { pdmSMPDiscoverResp = (smpRespDiscover_t *)timerData3; } else { pdmSMPDiscover2Resp = (smpRespDiscover2_t *)timerData3; } discovery = &(onePortContext->discovery); DM_DBG3(("dmConfigureRouteTimerCB: onePortContext %p oneExpander %p pdmSMPDiscoverResp %p\n", onePortContext, oneExpander, pdmSMPDiscoverResp)); DM_DBG3(("dmConfigureRouteTimerCB: discovery %p\n", discovery)); DM_DBG3(("dmConfigureRouteTimerCB: pid %d configureRouteRetries %d\n", onePortContext->id, discovery->configureRouteRetries)); DM_DBG3(("dmConfigureRouteTimerCB: discovery.status %d\n", discovery->status)); discovery->configureRouteRetries++; if (discovery->configureRouteRetries >= dmAllShared->MaxRetryDiscovery) { DM_DBG3(("dmConfigureRouteTimerCB: retries are over\n")); tddmSingleThreadedEnter(dmRoot, DM_TIMER_LOCK); if (discovery->configureRouteTimer.timerRunning == agTRUE) { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); dmKillTimer( dmRoot, &discovery->configureRouteTimer ); } else { tddmSingleThreadedLeave(dmRoot, DM_TIMER_LOCK); } discovery->configureRouteRetries = 0; /* failed the discovery */ dmDiscoverDone(dmRoot, onePortContext, DM_RC_FAILURE); return; } if (oneExpander->SAS2 == 0) { if (onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { DM_DBG3(("dmConfigureRouteTimerCB: proceed by calling dmDownStreamDiscoverExpanderPhy\n")); dmhexdump("dmConfigureRouteTimerCB", (bit8*)pdmSMPDiscoverResp, sizeof(smpRespDiscover_t)); discovery->configureRouteRetries = 0; dmDownStreamDiscoverExpanderPhy(dmRoot, onePortContext, oneExpander, pdmSMPDiscoverResp); } else { DM_DBG3(("dmConfigureRouteTimerCB: setting timer again\n")); /* set the timer again */ dmSetTimerRequest( dmRoot, &discovery->configureRouteTimer, CONFIGURE_ROUTE_TIMER_VALUE/dmAllShared->usecsPerTick, dmConfigureRouteTimerCB, (void *)onePortContext, (void *)oneExpander, (void *)pdmSMPDiscoverResp ); dmAddTimer ( dmRoot, &dmAllShared->timerlist, &discovery->configureRouteTimer ); } } /* SAS 1.1 */ else { /* SAS 2 */ if (onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { DM_DBG2(("dmConfigureRouteTimerCB: proceed by calling dmDownStreamDiscover2ExpanderPhy\n")); dmhexdump("dmConfigureRouteTimerCB", (bit8*)pdmSMPDiscover2Resp, sizeof(smpRespDiscover2_t)); dmDownStreamDiscover2ExpanderPhy(dmRoot, onePortContext, oneExpander, pdmSMPDiscover2Resp); } else { DM_DBG2(("dmConfigureRouteTimerCB: setting timer again\n")); /* set the timer again */ dmSetTimerRequest( dmRoot, &discovery->configureRouteTimer, CONFIGURE_ROUTE_TIMER_VALUE/dmAllShared->usecsPerTick, dmConfigureRouteTimerCB, (void *)onePortContext, (void *)oneExpander, (void *)pdmSMPDiscover2Resp ); dmAddTimer ( dmRoot, &dmAllShared->timerlist, &discovery->configureRouteTimer ); } } return; } #endif /* FDS_ DM */ Index: head/sys/dev/pms/RefTisa/sallsdk/spc/sainit.c =================================================================== --- head/sys/dev/pms/RefTisa/sallsdk/spc/sainit.c (revision 359440) +++ head/sys/dev/pms/RefTisa/sallsdk/spc/sainit.c (revision 359441) @@ -1,4664 +1,4664 @@ /******************************************************************************* *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the *following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided *with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED *WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS *FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT *NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR *BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ********************************************************************************/ /*******************************************************************************/ /*! \file sainit.c * \brief The file implements the functions to initialize the LL layer * */ /******************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #include #ifdef SA_ENABLE_TRACE_FUNCTIONS #ifdef siTraceFileID #undef siTraceFileID #endif #define siTraceFileID 'F' #endif bit32 gLLDebugLevel = 3; #if defined(SALLSDK_DEBUG) bit32 gLLDebugLevelSet = 0; // block reinitialize from updating bit32 gLLLogFuncDebugLevel = 0; bit32 gLLSoftResetCounter = 0; #endif bit32 gPollForMissingInt; #ifdef FW_EVT_LOG_TST void *eventLogAddress = NULL; #endif extern bit32 gWait_3; extern bit32 gWait_2; bit32 gFPGA_TEST = 0; // If set unblock fpga functions /******************************************************************************/ /*! \brief Get the memory and lock requirement from LL layer * * Get the memory and lock requirement from LL layer * * \param agRoot Handles for this instance of SAS/SATA hardware * \param swConfig Pointer to the software configuration * \param memoryRequirement Point to the data structure that holds the different * chunks of memory that are required * \param usecsPerTick micro-seconds per tick for the LL layer * \param maxNumLocks maximum number of locks for the LL layer * * \return -void- * */ /*******************************************************************************/ GLOBAL void saGetRequirements( agsaRoot_t *agRoot, agsaSwConfig_t *swConfig, agsaMemoryRequirement_t *memoryRequirement, bit32 *usecsPerTick, bit32 *maxNumLocks ) { bit32 memoryReqCount = 0; bit32 i; static mpiConfig_t mpiConfig; static mpiMemReq_t mpiMemoryRequirement; /* sanity check */ SA_ASSERT((agNULL != swConfig), ""); SA_ASSERT((agNULL != memoryRequirement), ""); SA_ASSERT((agNULL != usecsPerTick), ""); SA_ASSERT((agNULL != maxNumLocks), ""); si_memset(&mpiMemoryRequirement, 0, sizeof(mpiMemReq_t)); si_memset(&mpiConfig, 0, sizeof(mpiConfig_t)); SA_DBG1(("saGetRequirements:agRoot %p swConfig %p memoryRequirement %p usecsPerTick %p maxNumLocks %p\n",agRoot, swConfig,memoryRequirement,usecsPerTick,maxNumLocks)); SA_DBG1(("saGetRequirements: usecsPerTick 0x%x (%d)\n",*usecsPerTick,*usecsPerTick)); /* Get Resource Requirements for SPC MPI */ /* Set the default/specified requirements swConfig from TD layer */ siConfiguration(agRoot, &mpiConfig, agNULL, swConfig); mpiRequirementsGet(&mpiConfig, &mpiMemoryRequirement); /* memory requirement for saRoot, CACHE memory */ memoryRequirement->agMemory[LLROOT_MEM_INDEX].singleElementLength = sizeof(agsaLLRoot_t); memoryRequirement->agMemory[LLROOT_MEM_INDEX].numElements = 1; memoryRequirement->agMemory[LLROOT_MEM_INDEX].totalLength = sizeof(agsaLLRoot_t); memoryRequirement->agMemory[LLROOT_MEM_INDEX].alignment = sizeof(void *); memoryRequirement->agMemory[LLROOT_MEM_INDEX].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[LLROOT_MEM_INDEX] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[LLROOT_MEM_INDEX].singleElementLength, memoryRequirement->agMemory[LLROOT_MEM_INDEX].totalLength, memoryRequirement->agMemory[LLROOT_MEM_INDEX].alignment, memoryRequirement->agMemory[LLROOT_MEM_INDEX].type )); /* memory requirement for Device Links, CACHE memory */ memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].singleElementLength = sizeof(agsaDeviceDesc_t); memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].numElements = swConfig->numDevHandles; memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].totalLength = sizeof(agsaDeviceDesc_t) * swConfig->numDevHandles; memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].alignment = sizeof(void *); memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[DEVICELINK_MEM_INDEX] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].singleElementLength, memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].totalLength, memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].alignment, memoryRequirement->agMemory[DEVICELINK_MEM_INDEX].type )); /* memory requirement for IORequest Links, CACHE memory */ memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].singleElementLength = sizeof(agsaIORequestDesc_t); /* Add SA_RESERVED_REQUEST_COUNT to guarantee quality of service */ memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].numElements = swConfig->maxActiveIOs + SA_RESERVED_REQUEST_COUNT; memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].totalLength = sizeof(agsaIORequestDesc_t) * memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].numElements; memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].alignment = sizeof(void *); memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[IOREQLINK_MEM_INDEX] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].singleElementLength, memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].totalLength, memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].alignment, memoryRequirement->agMemory[IOREQLINK_MEM_INDEX].type )); /* memory requirement for Timer Links, CACHE memory */ memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].singleElementLength = sizeof(agsaTimerDesc_t); memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].numElements = NUM_TIMERS; memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].totalLength = sizeof(agsaTimerDesc_t) * NUM_TIMERS; memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].alignment = sizeof(void *); memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[TIMERLINK_MEM_INDEX] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].singleElementLength, memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].totalLength, memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].alignment, memoryRequirement->agMemory[TIMERLINK_MEM_INDEX].type )); #ifdef SA_ENABLE_TRACE_FUNCTIONS /* memory requirement for LL trace memory */ memoryRequirement->agMemory[LL_FUNCTION_TRACE].singleElementLength = 1; memoryRequirement->agMemory[LL_FUNCTION_TRACE].numElements = swConfig->TraceBufferSize; memoryRequirement->agMemory[LL_FUNCTION_TRACE].totalLength = swConfig->TraceBufferSize; memoryRequirement->agMemory[LL_FUNCTION_TRACE].alignment = sizeof(void *); memoryRequirement->agMemory[LL_FUNCTION_TRACE].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[LL_FUNCTION_TRACE] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[LL_FUNCTION_TRACE].singleElementLength, memoryRequirement->agMemory[LL_FUNCTION_TRACE].totalLength, memoryRequirement->agMemory[LL_FUNCTION_TRACE].alignment, memoryRequirement->agMemory[LL_FUNCTION_TRACE].type )); #endif /* END SA_ENABLE_TRACE_FUNCTIONS */ #ifdef FAST_IO_TEST { agsaMem_t *agMemory = memoryRequirement->agMemory; /* memory requirement for Super IO CACHE memory */ agMemory[LL_FAST_IO].singleElementLength = sizeof(saFastRequest_t); agMemory[LL_FAST_IO].numElements = LL_FAST_IO_SIZE; agMemory[LL_FAST_IO].totalLength = LL_FAST_IO_SIZE * agMemory[LL_FAST_IO].singleElementLength; agMemory[LL_FAST_IO].alignment = sizeof(void*); agMemory[LL_FAST_IO].type = AGSA_CACHED_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[LL_FAST_IO] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[LL_FAST_IO].singleElementLength, memoryRequirement->agMemory[LL_FAST_IO].totalLength, memoryRequirement->agMemory[LL_FAST_IO].alignment, memoryRequirement->agMemory[LL_FAST_IO].type )); } #endif #ifdef SA_ENABLE_HDA_FUNCTIONS { agsaMem_t *agMemory = memoryRequirement->agMemory; /* memory requirement for HDA FW image */ agMemory[HDA_DMA_BUFFER].singleElementLength = (1024 * 1024); /* must be greater than size of aap1 fw image */ agMemory[HDA_DMA_BUFFER].numElements = 1; agMemory[HDA_DMA_BUFFER].totalLength = agMemory[HDA_DMA_BUFFER].numElements * agMemory[HDA_DMA_BUFFER].singleElementLength; agMemory[HDA_DMA_BUFFER].alignment = 32; agMemory[HDA_DMA_BUFFER].type = AGSA_DMA_MEM; memoryReqCount ++; SA_DBG1(("saGetRequirements: agMemory[HDA_DMA_BUFFER] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryRequirement->agMemory[HDA_DMA_BUFFER].singleElementLength, memoryRequirement->agMemory[HDA_DMA_BUFFER].totalLength, memoryRequirement->agMemory[HDA_DMA_BUFFER].alignment, memoryRequirement->agMemory[HDA_DMA_BUFFER].type )); } #endif /* SA_ENABLE_HDA_FUNCTIONS */ /* memory requirement for MPI MSGU layer, DMA memory */ for ( i = 0; i < mpiMemoryRequirement.count; i ++ ) { memoryRequirement->agMemory[memoryReqCount].singleElementLength = mpiMemoryRequirement.region[i].elementSize; memoryRequirement->agMemory[memoryReqCount].numElements = mpiMemoryRequirement.region[i].numElements; memoryRequirement->agMemory[memoryReqCount].totalLength = mpiMemoryRequirement.region[i].totalLength; memoryRequirement->agMemory[memoryReqCount].alignment = mpiMemoryRequirement.region[i].alignment; memoryRequirement->agMemory[memoryReqCount].type = mpiMemoryRequirement.region[i].type; SA_DBG1(("saGetRequirements:MPI agMemory[%d] singleElementLength = 0x%x totalLength = 0x%x align = 0x%x type %x\n", memoryReqCount, memoryRequirement->agMemory[memoryReqCount].singleElementLength, memoryRequirement->agMemory[memoryReqCount].totalLength, memoryRequirement->agMemory[memoryReqCount].alignment, memoryRequirement->agMemory[memoryReqCount].type )); memoryReqCount ++; } /* requirement for locks */ if (swConfig->param3 == agNULL) { *maxNumLocks = (LL_IOREQ_IBQ_LOCK + AGSA_MAX_INBOUND_Q ); SA_DBG1(("saGetRequirements: param3 == agNULL maxNumLocks %d\n", *maxNumLocks )); } else { agsaQueueConfig_t *queueConfig; queueConfig = (agsaQueueConfig_t *)swConfig->param3; *maxNumLocks = (LL_IOREQ_IBQ_LOCK_PARM + queueConfig->numInboundQueues ); SA_DBG1(("saGetRequirements: maxNumLocks %d\n", *maxNumLocks )); } /* setup the time tick */ *usecsPerTick = SA_USECS_PER_TICK; SA_ASSERT(memoryReqCount < AGSA_NUM_MEM_CHUNKS, "saGetRequirements: Exceed max number of memory place holder"); /* set up memory requirement count */ memoryRequirement->count = memoryReqCount; swConfig->legacyInt_X = 1; swConfig->max_MSI_InterruptVectors = 32; swConfig->max_MSIX_InterruptVectors = 64;//16; SA_DBG1(("saGetRequirements: swConfig->stallUsec %d\n",swConfig->stallUsec )); #ifdef SA_CONFIG_MDFD_REGISTRY SA_DBG1(("saGetRequirements: swConfig->disableMDF %d\n",swConfig->disableMDF)); #endif /*SA_CONFIG_MDFD_REGISTRY*/ /*SA_DBG1(("saGetRequirements: swConfig->enableDIF %d\n",swConfig->enableDIF ));*/ /*SA_DBG1(("saGetRequirements: swConfig->enableEncryption %d\n",swConfig->enableEncryption ));*/ #ifdef SA_ENABLE_HDA_FUNCTIONS swConfig->hostDirectAccessSupport = 1; swConfig->hostDirectAccessMode = 0; #else swConfig->hostDirectAccessSupport = 0; swConfig->hostDirectAccessMode = 0; #endif } /******************************************************************************/ /*! \brief Initialize the Hardware * * Initialize the Hardware * * \param agRoot Handles for this instance of SAS/SATA hardware * \param memoryAllocated Point to the data structure that holds the different chunks of memory that are required * \param hwConfig Pointer to the hardware configuration * \param swConfig Pointer to the software configuration * \param usecsPerTick micro-seconds per tick for the LL layer * * \return If initialization is successful * - \e AGSA_RC_SUCCESS initialization is successful * - \e AGSA_RC_FAILURE initialization is not successful */ /*******************************************************************************/ GLOBAL bit32 saInitialize( agsaRoot_t *agRoot, agsaMemoryRequirement_t *memoryAllocated, agsaHwConfig_t *hwConfig, agsaSwConfig_t *swConfig, bit32 usecsPerTick ) { agsaLLRoot_t *saRoot; agsaDeviceDesc_t *pDeviceDesc; agsaIORequestDesc_t *pRequestDesc; agsaTimerDesc_t *pTimerDesc; agsaPort_t *pPort; agsaPortMap_t *pPortMap; agsaDeviceMap_t *pDeviceMap; agsaIOMap_t *pIOMap; bit32 maxNumIODevices; bit32 i, j; static mpiMemReq_t mpiMemoryAllocated; bit32 Tried_NO_HDA = agFALSE; bit32 Double_Reset_HDA = agFALSE; bit32 ret = AGSA_RC_SUCCESS; #ifdef FAST_IO_TEST void *fr; /* saFastRequest_t */ bit32 size; bit32 alignment; #endif /* sanity check */ SA_ASSERT((agNULL != agRoot), ""); SA_ASSERT((agNULL != memoryAllocated), ""); SA_ASSERT((agNULL != hwConfig), ""); SA_ASSERT((agNULL != swConfig), ""); SA_ASSERT((LLROOT_MEM_INDEX < memoryAllocated->count), ""); SA_ASSERT((DEVICELINK_MEM_INDEX < memoryAllocated->count), ""); SA_ASSERT((IOREQLINK_MEM_INDEX < memoryAllocated->count), ""); SA_ASSERT((TIMERLINK_MEM_INDEX < memoryAllocated->count), ""); si_memset(&mpiMemoryAllocated, 0, sizeof(mpiMemReq_t)); si_macro_check(agRoot); SA_DBG1(("saInitialize: WAIT_INCREMENT %d\n", WAIT_INCREMENT )); SA_DBG1(("saInitialize: usecsPerTick %d\n", usecsPerTick )); if(! smIS_SPC(agRoot)) { if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: ossaHwRegReadConfig32 ID reads as %08X\n", ossaHwRegReadConfig32(agRoot,0 ) )); SA_DBG1(("saInitialize: expect %08X or %08X or\n", VEN_DEV_SPCV, VEN_DEV_SPCVE)); SA_DBG1(("saInitialize: expect %08X or %08X or\n", VEN_DEV_SPCVP, VEN_DEV_SPCVEP)); SA_DBG1(("saInitialize: expect %08X or %08X\n", VEN_DEV_ADAPVEP, VEN_DEV_ADAPVP)); return AGSA_RC_FAILURE; } } if( smIS_SPC(agRoot) && smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: Macro error !smIS_SPC %d smIS_SPCv %d smIS_SFC %d\n",smIS_SPC(agRoot),smIS_SPCV(agRoot), smIS_SFC(agRoot) )); return AGSA_RC_FAILURE; } /* Check the memory allocated */ for ( i = 0; i < memoryAllocated->count; i ++ ) { /* If memory allocation failed */ if (memoryAllocated->agMemory[i].singleElementLength && memoryAllocated->agMemory[i].numElements) { if ( (0 != memoryAllocated->agMemory[i].numElements) && (0 == memoryAllocated->agMemory[i].totalLength) ) { /* return failure */ SA_DBG1(("saInitialize:AGSA_RC_FAILURE Memory[%d] singleElementLength = 0x%x numElements = 0x%x NOT allocated\n", i, memoryAllocated->agMemory[i].singleElementLength, memoryAllocated->agMemory[i].numElements)); ret = AGSA_RC_FAILURE; return ret; } else { SA_DBG1(("saInitialize: Memory[%d] singleElementLength = 0x%x numElements = 0x%x allocated %p\n", i, memoryAllocated->agMemory[i].singleElementLength, memoryAllocated->agMemory[i].numElements, memoryAllocated->agMemory[i].virtPtr)); } } } /* Get the saRoot memory address */ saRoot = (agsaLLRoot_t *) (memoryAllocated->agMemory[LLROOT_MEM_INDEX].virtPtr); SA_ASSERT((agNULL != saRoot), "saRoot"); if(agNULL == saRoot) { SA_DBG1(("saInitialize:AGSA_RC_FAILURE saRoot\n")); return AGSA_RC_FAILURE; } agRoot->sdkData = (void *) saRoot; SA_DBG1(("saInitialize: saRoot %p\n",saRoot)); if ( (memoryAllocated != &saRoot->memoryAllocated) || (hwConfig != &saRoot->hwConfig) || (swConfig != &saRoot->swConfig) ) { agsaMemoryRequirement_t *memA = &saRoot->memoryAllocated; agsaHwConfig_t *hwC = &saRoot->hwConfig; agsaSwConfig_t *swC = &saRoot->swConfig; /* Copy data here */ *memA = *memoryAllocated; *hwC = *hwConfig; *swC = *swConfig; } #if defined(SALLSDK_DEBUG) if(gLLDebugLevelSet == 0) { gLLDebugLevelSet = 1; gLLDebugLevel = swConfig->sallDebugLevel & 0xF; SA_DBG1(("saInitialize: gLLDebugLevel %x\n",gLLDebugLevel)); } #endif /* SALLSDK_DEBUG */ #ifdef SA_ENABLE_TRACE_FUNCTIONS saRoot->TraceBufferLength = memoryAllocated->agMemory[LL_FUNCTION_TRACE].totalLength; saRoot->TraceBuffer = memoryAllocated->agMemory[LL_FUNCTION_TRACE].virtPtr; siEnableTracing ( agRoot ); /* */ #endif /* SA_ENABLE_TRACE_FUNCTIONS */ #ifdef FAST_IO_TEST { agsaMem_t *agMemory = memoryAllocated->agMemory; /* memory requirement for Super IO CACHE memory */ size = sizeof(saRoot->freeFastReq) / sizeof(saRoot->freeFastReq[0]); SA_ASSERT(size == agMemory[LL_FAST_IO].numElements, ""); SA_ASSERT(agMemory[LL_FAST_IO].virtPtr, ""); SA_ASSERT((agMemory[LL_FAST_IO].singleElementLength == sizeof(saFastRequest_t)) && (agMemory[LL_FAST_IO].numElements == LL_FAST_IO_SIZE) && (agMemory[LL_FAST_IO].totalLength == agMemory[LL_FAST_IO].numElements * agMemory[LL_FAST_IO].singleElementLength), ""); for (i = 0, alignment = agMemory[LL_FAST_IO].alignment, fr = agMemory[LL_FAST_IO].virtPtr; i < size; i++, fr = (void*)((bitptr)fr + (bitptr)(((bit32)sizeof(saFastRequest_t) + alignment - 1) & ~(alignment - 1)))) { saRoot->freeFastReq[i] = fr; } saRoot->freeFastIdx = size; } #endif /* FAST_IO_TEST*/ smTraceFuncEnter(hpDBG_VERY_LOUD, "m1"); SA_DBG1(("saInitialize: swConfig->PortRecoveryResetTimer %x\n",swConfig->PortRecoveryResetTimer )); SA_DBG1(("saInitialize: hwDEVICE_ID_VENDID 0x%08x\n", ossaHwRegReadConfig32(agRoot,0))); SA_DBG1(("saInitialize: CFGSTAT CFGCMD 0x%08x\n", ossaHwRegReadConfig32(agRoot,4))); SA_DBG1(("saInitialize: CLSCODE REVID 0x%08x\n", ossaHwRegReadConfig32(agRoot,8))); SA_DBG1(("saInitialize: BIST DT HDRTYPE LATTIM CLSIZE 0x%08x\n", ossaHwRegReadConfig32(agRoot,12))); SA_DBG1(("saInitialize: hwSVID 0x%08x\n", ossaHwRegReadConfig32(agRoot,44))); #ifdef SA_ENABLE_PCI_TRIGGER SA_DBG1(("saInitialize: SA_ENABLE_PCI_TRIGGER a 0x%08x %p\n", saRoot->swConfig.PCI_trigger,&saRoot->swConfig.PCI_trigger)); if( saRoot->swConfig.PCI_trigger & PCI_TRIGGER_INIT_TEST ) { SA_DBG1(("saInitialize: SA_ENABLE_PCI_TRIGGER 0x%08x %p\n", saRoot->swConfig.PCI_trigger,&saRoot->swConfig.PCI_trigger)); saRoot->swConfig.PCI_trigger &= ~PCI_TRIGGER_INIT_TEST; siPCITriger(agRoot); } #endif /* SA_ENABLE_PCI_TRIGGER */ saRoot->ChipId = (ossaHwRegReadConfig32(agRoot,0) & 0xFFFF0000); SA_DBG1(("saInitialize: saRoot->ChipId 0x%08x\n", saRoot->ChipId)); siUpdateBarOffsetTable(agRoot,saRoot->ChipId); if(saRoot->ChipId == VEN_DEV_SPC) { if(! smIS_SPC(agRoot)) { SA_DBG1(("saInitialize: smIS_SPC macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m1"); return AGSA_RC_FAILURE; } SA_DBG1(("saInitialize: SPC \n" )); } else if(saRoot->ChipId == VEN_DEV_HIL ) { SA_DBG1(("saInitialize: SPC HIL\n" )); if(! smIS_SPC(agRoot)) { SA_DBG1(("saInitialize: smIS_SPC macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPCV) { SA_DBG1(("saInitialize: SPC V\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPCVE) { SA_DBG1(("saInitialize: SPC VE\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPCVP) { SA_DBG1(("saInitialize: SPC VP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'e', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPCVEP) { SA_DBG1(("saInitialize: SPC VEP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'f', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_ADAPVP) { SA_DBG1(("saInitialize: Adaptec 8088\n" )); } else if(saRoot->ChipId == VEN_DEV_ADAPVEP) { SA_DBG1(("saInitialize: Adaptec 8089\n" )); } else if(saRoot->ChipId == VEN_DEV_SPC12V) { SA_DBG1(("saInitialize: SPC 12V\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'g', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12VE) { SA_DBG1(("saInitialize: SPC 12VE\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'h', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12VP) { SA_DBG1(("saInitialize: SPC 12VP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'i', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12VEP) { SA_DBG1(("saInitialize: SPC 12VEP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'j', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12ADP) { SA_DBG1(("saInitialize: SPC 12ADP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'k', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12ADPE) { SA_DBG1(("saInitialize: SPC 12ADPE\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'l', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12ADPP) { SA_DBG1(("saInitialize: SPC 12ADPP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'm', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12ADPEP) { SA_DBG1(("saInitialize: SPC 12ADPEP\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'n', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SPC12SATA) { SA_DBG1(("saInitialize: SPC12SATA\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'o', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_9015) { SA_DBG1(("saInitialize: SPC 12V FPGA\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'p', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_9060) { SA_DBG1(("saInitialize: SPC 12V FPGA B\n" )); if(! smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: smIS_SPCV macro fail !!!!\n" )); smTraceFuncExit(hpDBG_VERY_LOUD, 'q', "m1"); return AGSA_RC_FAILURE; } } else if(saRoot->ChipId == VEN_DEV_SFC) { SA_DBG1(("saInitialize: SFC \n" )); } else { SA_DBG1(("saInitialize saRoot->ChipId %8X expect %8X or %8X\n", saRoot->ChipId,VEN_DEV_SPC, VEN_DEV_SPCV)); SA_ASSERT(0, "ChipId"); smTraceFuncExit(hpDBG_VERY_LOUD, 'r', "m1"); return AGSA_RC_FAILURE; } if( smIS_SPC(agRoot)) { SA_DBG1(("saInitialize: Rev is A %d B %d C %d\n",smIsCfgSpcREV_A(agRoot),smIsCfgSpcREV_B(agRoot),smIsCfgSpcREV_C(agRoot))); } else { SA_DBG1(("saInitialize: Rev is A %d B %d C %d\n",smIsCfgVREV_A(agRoot),smIsCfgVREV_B(agRoot),smIsCfgVREV_C(agRoot))); } if( smIS_SPC(agRoot)) { SA_DBG1(("saInitialize: LINK_CTRL 0x%08x Speed 0x%X Lanes 0x%X \n", ossaHwRegReadConfig32(agRoot,128), ((ossaHwRegReadConfig32(agRoot,128) & 0x000F0000) >> 16), ((ossaHwRegReadConfig32(agRoot,128) & 0x0FF00000) >> 20) )); } else { SA_DBG1(("saInitialize: LINK_CTRL 0x%08x Speed 0x%X Lanes 0x%X \n", ossaHwRegReadConfig32(agRoot,208), ((ossaHwRegReadConfig32(agRoot,208) & 0x000F0000) >> 16), ((ossaHwRegReadConfig32(agRoot,208) & 0x0FF00000) >> 20) )); } SA_DBG1(("saInitialize: V_SoftResetRegister %08X\n", ossaHwRegReadExt(agRoot, PCIBAR0, V_SoftResetRegister ))); /* SA_DBG1(("saInitialize:TOP_BOOT_STRAP STRAP_BIT %X\n", ossaHwRegReadExt(agRoot, PCIBAR1, 0) )); SA_DBG1(("SPC_REG_TOP_DEVICE_ID %8X expect %08X\n", ossaHwRegReadExt(agRoot, PCIBAR2, SPC_REG_TOP_DEVICE_ID), SPC_TOP_DEVICE_ID)); SA_DBG1(("SPC_REG_TOP_DEVICE_ID %8X expect %08X\n", siHalRegReadExt( agRoot, GEN_SPC_REG_TOP_DEVICE_ID,SPC_REG_TOP_DEVICE_ID ) , SPC_TOP_DEVICE_ID)); SA_DBG1(("SPC_REG_TOP_BOOT_STRAP %8X expect %08X\n", ossaHwRegReadExt(agRoot, PCIBAR2, SPC_REG_TOP_BOOT_STRAP), SPC_TOP_BOOT_STRAP)); SA_DBG1(("swConfig->numSASDevHandles =%d\n", swConfig->numDevHandles)); */ smTrace(hpDBG_VERY_LOUD,"29",swConfig->numDevHandles); /* TP:29 swConfig->numDevHandles */ /* Setup Device link */ /* Save the information of allocated device Link memory */ saRoot->deviceLinkMem = memoryAllocated->agMemory[DEVICELINK_MEM_INDEX]; if(agNULL == saRoot->deviceLinkMem.virtPtr) { SA_ASSERT(0, "deviceLinkMem"); smTraceFuncExit(hpDBG_VERY_LOUD, 'q', "m1"); return AGSA_RC_FAILURE; } si_memset(saRoot->deviceLinkMem.virtPtr, 0, saRoot->deviceLinkMem.totalLength); SA_DBG2(("saInitialize: [%d] saRoot->deviceLinkMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", DEVICELINK_MEM_INDEX, saRoot->deviceLinkMem.virtPtr, saRoot->deviceLinkMem.phyAddrLower, saRoot->deviceLinkMem.numElements, saRoot->deviceLinkMem.totalLength, saRoot->deviceLinkMem.type)); maxNumIODevices = swConfig->numDevHandles; SA_DBG2(("saInitialize: maxNumIODevices=%d, swConfig->numDevHandles=%d \n", maxNumIODevices, swConfig->numDevHandles)); #ifdef SA_ENABLE_PCI_TRIGGER SA_DBG1(("saInitialize: swConfig->PCI_trigger= 0x%x\n", swConfig->PCI_trigger)); #endif /* SA_ENABLE_PCI_TRIGGER */ /* Setup free IO Devices link list */ saLlistInitialize(&(saRoot->freeDevicesList)); for ( i = 0; i < (bit32) maxNumIODevices; i ++ ) { /* get the pointer to the device descriptor */ pDeviceDesc = (agsaDeviceDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->deviceLinkMem), i); /* Initialize device descriptor */ saLlinkInitialize(&(pDeviceDesc->linkNode)); pDeviceDesc->initiatorDevHandle.osData = agNULL; pDeviceDesc->initiatorDevHandle.sdkData = agNULL; pDeviceDesc->targetDevHandle.osData = agNULL; pDeviceDesc->targetDevHandle.sdkData = agNULL; pDeviceDesc->deviceType = SAS_SATA_UNKNOWN_DEVICE; pDeviceDesc->pPort = agNULL; pDeviceDesc->DeviceMapIndex = 0; saLlistInitialize(&(pDeviceDesc->pendingIORequests)); /* Add the device descriptor to the free IO device link list */ saLlistAdd(&(saRoot->freeDevicesList), &(pDeviceDesc->linkNode)); } /* Setup IO Request link */ /* Save the information of allocated IO Request Link memory */ saRoot->IORequestMem = memoryAllocated->agMemory[IOREQLINK_MEM_INDEX]; si_memset(saRoot->IORequestMem.virtPtr, 0, saRoot->IORequestMem.totalLength); SA_DBG2(("saInitialize: [%d] saRoot->IORequestMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", IOREQLINK_MEM_INDEX, saRoot->IORequestMem.virtPtr, saRoot->IORequestMem.phyAddrLower, saRoot->IORequestMem.numElements, saRoot->IORequestMem.totalLength, saRoot->IORequestMem.type)); /* Setup free IO Request link list */ saLlistIOInitialize(&(saRoot->freeIORequests)); saLlistIOInitialize(&(saRoot->freeReservedRequests)); for ( i = 0; i < swConfig->maxActiveIOs; i ++ ) { /* get the pointer to the request descriptor */ pRequestDesc = (agsaIORequestDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->IORequestMem), i); /* Initialize request descriptor */ saLlinkInitialize(&(pRequestDesc->linkNode)); pRequestDesc->valid = agFALSE; pRequestDesc->requestType = AGSA_REQ_TYPE_UNKNOWN; pRequestDesc->pIORequestContext = agNULL; pRequestDesc->HTag = i; pRequestDesc->pDevice = agNULL; pRequestDesc->pPort = agNULL; /* Add the request descriptor to the free Reserved Request link list */ /* SMP request must get service so reserve one request when first SMP completes */ if(saLlistIOGetCount(&(saRoot->freeReservedRequests)) < SA_RESERVED_REQUEST_COUNT) { saLlistIOAdd(&(saRoot->freeReservedRequests), &(pRequestDesc->linkNode)); } else { /* Add the request descriptor to the free IO Request link list */ saLlistIOAdd(&(saRoot->freeIORequests), &(pRequestDesc->linkNode)); } } /* Setup timer link */ /* Save the information of allocated timer Link memory */ saRoot->timerLinkMem = memoryAllocated->agMemory[TIMERLINK_MEM_INDEX]; si_memset(saRoot->timerLinkMem.virtPtr, 0, saRoot->timerLinkMem.totalLength); SA_DBG2(("saInitialize: [%d] saRoot->timerLinkMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", TIMERLINK_MEM_INDEX, saRoot->timerLinkMem.virtPtr, saRoot->timerLinkMem.phyAddrLower, saRoot->timerLinkMem.numElements, saRoot->timerLinkMem.totalLength, saRoot->timerLinkMem.type )); /* Setup free timer link list */ saLlistInitialize(&(saRoot->freeTimers)); for ( i = 0; i < NUM_TIMERS; i ++ ) { /* get the pointer to the timer descriptor */ pTimerDesc = (agsaTimerDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->timerLinkMem), i); /* Initialize timer descriptor */ saLlinkInitialize(&(pTimerDesc->linkNode)); pTimerDesc->valid = agFALSE; pTimerDesc->timeoutTick = 0; pTimerDesc->pfnTimeout = agNULL; pTimerDesc->Event = 0; pTimerDesc->pParm = agNULL; /* Add the timer descriptor to the free timer link list */ saLlistAdd(&(saRoot->freeTimers), &(pTimerDesc->linkNode)); } /* Setup valid timer link list */ saLlistInitialize(&(saRoot->validTimers)); /* Setup Phys */ /* Setup PhyCount */ saRoot->phyCount = (bit8) hwConfig->phyCount; /* Init Phy data structure */ for ( i = 0; i < saRoot->phyCount; i ++ ) { saRoot->phys[i].pPort = agNULL; saRoot->phys[i].phyId = (bit8) i; /* setup phy status is PHY_STOPPED */ PHY_STATUS_SET(&(saRoot->phys[i]), PHY_STOPPED); } /* Setup Ports */ /* Setup PortCount */ saRoot->portCount = saRoot->phyCount; /* Setup free port link list */ saLlistInitialize(&(saRoot->freePorts)); for ( i = 0; i < saRoot->portCount; i ++ ) { /* get the pointer to the port */ pPort = &(saRoot->ports[i]); /* Initialize port */ saLlinkInitialize(&(pPort->linkNode)); pPort->portContext.osData = agNULL; pPort->portContext.sdkData = pPort; pPort->portId = 0; pPort->portIdx = (bit8) i; pPort->status = PORT_NORMAL; for ( j = 0; j < saRoot->phyCount; j ++ ) { pPort->phyMap[j] = agFALSE; } saLlistInitialize(&(pPort->listSASATADevices)); /* Add the port to the free port link list */ saLlistAdd(&(saRoot->freePorts), &(pPort->linkNode)); } /* Setup valid port link list */ saLlistInitialize(&(saRoot->validPorts)); /* Init sysIntsActive - default is interrupt enable */ saRoot->sysIntsActive = agFALSE; /* setup timer tick granunarity */ saRoot->usecsPerTick = usecsPerTick; /* setup smallest timer increment for stall */ saRoot->minStallusecs = swConfig->stallUsec; SA_DBG1(("saInitialize: WAIT_INCREMENT %d\n" ,WAIT_INCREMENT )); if (0 == WAIT_INCREMENT) { saRoot->minStallusecs = WAIT_INCREMENT_DEFAULT; } /* initialize LL timer tick */ saRoot->timeTick = 0; /* initialize device (de)registration callback fns */ saRoot->DeviceRegistrationCB = agNULL; saRoot->DeviceDeregistrationCB = agNULL; /* Initialize the PortMap for port context */ for ( i = 0; i < saRoot->portCount; i ++ ) { pPortMap = &(saRoot->PortMap[i]); pPortMap->PortContext = agNULL; pPortMap->PortID = PORT_MARK_OFF; pPortMap->PortStatus = PORT_NORMAL; saRoot->autoDeregDeviceflag[i] = 0; } /* Initialize the DeviceMap for device handle */ for ( i = 0; i < MAX_IO_DEVICE_ENTRIES; i ++ ) { pDeviceMap = &(saRoot->DeviceMap[i]); pDeviceMap->DeviceHandle = agNULL; pDeviceMap->DeviceIdFromFW = i; } /* Initialize the IOMap for IOrequest */ for ( i = 0; i < MAX_ACTIVE_IO_REQUESTS; i ++ ) { pIOMap = &(saRoot->IOMap[i]); pIOMap->IORequest = agNULL; pIOMap->Tag = MARK_OFF; } /* setup mpi configuration */ if (!swConfig->param3) { /* default configuration */ siConfiguration(agRoot, &saRoot->mpiConfig, hwConfig, swConfig); } else { /* get from TD layer and save it */ agsaQueueConfig_t *dCFG = &saRoot->QueueConfig; agsaQueueConfig_t *sCFG = (agsaQueueConfig_t *)swConfig->param3; if (dCFG != sCFG) { *dCFG = *sCFG; if ((hwConfig->hwInterruptCoalescingTimer) || (hwConfig->hwInterruptCoalescingControl)) { for ( i = 0; i < sCFG->numOutboundQueues; i ++ ) { /* disable FW assisted coalescing */ sCFG->outboundQueues[i].interruptDelay = 0; sCFG->outboundQueues[i].interruptCount = 0; } if(smIS_SPC(agRoot)) { if (hwConfig->hwInterruptCoalescingTimer == 0) { hwConfig->hwInterruptCoalescingTimer = 1; SA_DBG1(("saInitialize:InterruptCoalescingTimer should not be zero. Force to 1\n")); } } } ret = siConfiguration(agRoot, &saRoot->mpiConfig, hwConfig, swConfig); if (AGSA_RC_FAILURE == ret) { SA_DBG1(("saInitialize failure queue number=%d\n", saRoot->QueueConfig.numInboundQueues)); agRoot->sdkData = agNULL; smTraceFuncExit(hpDBG_VERY_LOUD, 'r', "m1"); return ret; } } } saRoot->swConfig.param3 = &saRoot->QueueConfig; mpiMemoryAllocated.count = memoryAllocated->count - MPI_MEM_INDEX; for ( i = 0; i < mpiMemoryAllocated.count; i ++ ) { mpiMemoryAllocated.region[i].virtPtr = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].virtPtr; mpiMemoryAllocated.region[i].appHandle = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].osHandle; mpiMemoryAllocated.region[i].physAddrUpper = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].phyAddrUpper; mpiMemoryAllocated.region[i].physAddrLower = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].phyAddrLower; mpiMemoryAllocated.region[i].totalLength = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].totalLength; mpiMemoryAllocated.region[i].numElements = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].numElements; mpiMemoryAllocated.region[i].elementSize = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].singleElementLength; mpiMemoryAllocated.region[i].alignment = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].alignment; mpiMemoryAllocated.region[i].type = memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].type; SA_DBG2(("saInitialize: memoryAllocated->agMemory[%d] VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", (MPI_IBQ_OBQ_INDEX + i), memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].virtPtr, memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].phyAddrLower, memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].numElements, memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].totalLength, memoryAllocated->agMemory[MPI_IBQ_OBQ_INDEX + i].type)); /* set to zeros */ SA_DBG1(("saInitialize: Zero memory region %d virt %p allocated %d\n", i,mpiMemoryAllocated.region[i].virtPtr, mpiMemoryAllocated.region[i].totalLength)); si_memset(mpiMemoryAllocated.region[i].virtPtr , 0,mpiMemoryAllocated.region[i].totalLength); } if ((!swConfig->max_MSI_InterruptVectors) && (!swConfig->max_MSIX_InterruptVectors) && (!swConfig->legacyInt_X)) { /* polling mode */ SA_DBG1(("saInitialize: configured as polling mode\n")); } else { SA_DBG1(("saInitialize: swConfig->max_MSI_InterruptVectors %d\n",swConfig->max_MSI_InterruptVectors)); SA_DBG1(("saInitialize: swConfig->max_MSIX_InterruptVectors %d\n",swConfig->max_MSIX_InterruptVectors)); if ((swConfig->legacyInt_X > 1) || (swConfig->max_MSI_InterruptVectors > 32) || (swConfig->max_MSIX_InterruptVectors > 64)) { /* error */ agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE InterruptVectors A\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 's', "m1"); return AGSA_RC_FAILURE; } if ((swConfig->legacyInt_X) && (swConfig->max_MSI_InterruptVectors)) { /* error */ agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE InterruptVectors B\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 't', "m1"); return AGSA_RC_FAILURE; } else if ((swConfig->legacyInt_X) && (swConfig->max_MSIX_InterruptVectors)) { /* error */ agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE InterruptVectors C\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'u', "m1"); return AGSA_RC_FAILURE; } else if ((swConfig->max_MSI_InterruptVectors) && (swConfig->max_MSIX_InterruptVectors)) { /* error */ agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE InterruptVectors D\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'v', "m1"); return AGSA_RC_FAILURE; } } /* This section sets common interrupt for Legacy(IRQ) and MSI and MSIX types */ if(smIS_SPC(agRoot)) { SA_DBG1(("saInitialize: SPC interrupts\n" )); if (swConfig->legacyInt_X) { saRoot->OurInterrupt = siOurLegacyInterrupt; /* Called in ISR*/ saRoot->DisableInterrupts = siDisableLegacyInterrupts; /* Called in ISR*/ saRoot->ReEnableInterrupts = siReenableLegacyInterrupts;/* Called in Delayed Int handler*/ } else if (swConfig->max_MSIX_InterruptVectors) { saRoot->OurInterrupt = siOurMSIXInterrupt; saRoot->DisableInterrupts = siDisableMSIXInterrupts; saRoot->ReEnableInterrupts = siReenableMSIXInterrupts; } else if (swConfig->max_MSI_InterruptVectors) { saRoot->OurInterrupt = siOurMSIInterrupt; saRoot->DisableInterrupts = siDisableMSIInterrupts; saRoot->ReEnableInterrupts = siReenableMSIInterrupts; } else { /* polling mode */ saRoot->OurInterrupt = siOurLegacyInterrupt; /* Called in ISR*/ saRoot->DisableInterrupts = siDisableLegacyInterrupts; /* Called in ISR*/ saRoot->ReEnableInterrupts = siReenableLegacyInterrupts;/* Called in Delayed Int handler*/ } } else { SA_DBG1(("saInitialize: SPC V interrupts\n" )); if (swConfig->legacyInt_X ) { SA_DBG1(("saInitialize: SPC V legacyInt_X\n" )); saRoot->OurInterrupt = siOurLegacy_V_Interrupt; /* Called in ISR*/ saRoot->DisableInterrupts = siDisableLegacy_V_Interrupts; /* Called in ISR*/ saRoot->ReEnableInterrupts = siReenableLegacy_V_Interrupts;/* Called in Delayed Int handler*/ } else if (swConfig->max_MSIX_InterruptVectors) { SA_DBG1(("saInitialize: SPC V max_MSIX_InterruptVectors %X\n", swConfig->max_MSIX_InterruptVectors)); saRoot->OurInterrupt = siOurMSIX_V_Interrupt; /* */ saRoot->DisableInterrupts = siDisableMSIX_V_Interrupts; saRoot->ReEnableInterrupts = siReenableMSIX_V_Interrupts; } else if (swConfig->max_MSI_InterruptVectors) { SA_DBG1(("saInitialize: SPC V max_MSI_InterruptVectors\n" )); saRoot->OurInterrupt = siOurMSIX_V_Interrupt; /* */ saRoot->DisableInterrupts = siDisableMSIX_V_Interrupts; saRoot->ReEnableInterrupts = siReenableMSIX_V_Interrupts; } else { /* polling mode */ SA_DBG1(("saInitialize: SPC V polling mode\n" )); saRoot->OurInterrupt = siOurLegacy_V_Interrupt; /* Called in ISR*/ saRoot->DisableInterrupts = siDisableLegacy_V_Interrupts; /* Called in ISR*/ saRoot->ReEnableInterrupts = siReenableLegacy_V_Interrupts;/* Called in Delayed Int handler*/ } SA_DBG1(("saInitialize: SPC V\n" )); } saRoot->Use64bit = (saRoot->QueueConfig.numOutboundQueues > 32 ) ? 1 : 0; if( smIS64bInt(agRoot)) { SA_DBG1(("saInitialize: Use 64 bits for interrupts %d %d\n" ,saRoot->Use64bit, saRoot->QueueConfig.numOutboundQueues )); } else { SA_DBG1(("saInitialize: Use 32 bits for interrupts %d %d\n",saRoot->Use64bit , saRoot->QueueConfig.numOutboundQueues )); } #ifdef SA_LL_IBQ_PROTECT SA_DBG1(("saInitialize: Inbound locking defined since LL_IOREQ_IBQ0_LOCK %d\n",LL_IOREQ_IBQ0_LOCK)); #endif /* SA_LL_IBQ_PROTECT */ /* Disable interrupt */ saRoot->DisableInterrupts(agRoot, 0); SA_DBG1(("saInitialize: DisableInterrupts sysIntsActive %X\n" ,saRoot->sysIntsActive)); #ifdef SA_FW_TEST_BUNCH_STARTS saRoot->BunchStarts_Enable = FALSE; saRoot->BunchStarts_Threshold = 5; saRoot->BunchStarts_Pending = 0; saRoot->BunchStarts_TimeoutTicks = 10; // N x 100 ms #endif /* SA_FW_TEST_BUNCH_STARTS */ /* clear the interrupt vector bitmap */ for ( i = 0; i < MAX_NUM_VECTOR; i ++ ) { saRoot->interruptVecIndexBitMap[i] = 0; saRoot->interruptVecIndexBitMap1[i] = 0; } #if defined(SALLSDK_DEBUG) smTrace(hpDBG_VERY_LOUD,"2Y",0); /* TP:2Y SCRATCH_PAD */ SA_DBG1(("saInitialize: SCRATCH_PAD0 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_0))); SA_DBG1(("saInitialize: SCRATCH_PAD1 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1))); SA_DBG1(("saInitialize: SCRATCH_PAD2 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_2))); SA_DBG1(("saInitialize: SCRATCH_PAD3 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_3))); #endif /* SALLSDK_DEBUG */ if(smIS_SPCV(agRoot)) { bit32 ScratchPad1 =0; bit32 ScratchPad3 =0; ScratchPad1 = ossaHwRegRead(agRoot,V_Scratchpad_1_Register); ScratchPad3 = ossaHwRegRead(agRoot,V_Scratchpad_3_Register); if((ScratchPad1 & SCRATCH_PAD1_V_RAAE_MASK) == SCRATCH_PAD1_V_RAAE_MASK) { if(((ScratchPad3 & SCRATCH_PAD3_V_ENC_MASK ) == SCRATCH_PAD3_V_ENC_DIS_ERR ) || ((ScratchPad3 & SCRATCH_PAD3_V_ENC_MASK ) == SCRATCH_PAD3_V_ENC_ENA_ERR ) ) { SA_DBG1(("saInitialize:Warning Encryption Issue SCRATCH_PAD3 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_3))); } } } if( smIS_SPC(agRoot)) { #ifdef SA_ENABLE_HDA_FUNCTIONS TryWithHDA_ON: Double_Reset_HDA = TRUE; if (swConfig->hostDirectAccessSupport) { if (AGSA_RC_FAILURE == siHDAMode(agRoot, swConfig->hostDirectAccessMode, (agsaFwImg_t *)swConfig->param4)) { SA_DBG1(("saInitialize:AGSA_RC_FAILURE siHDAMode\n")); agRoot->sdkData = agNULL; smTraceFuncExit(hpDBG_VERY_LOUD, 'w', "m1"); return AGSA_RC_FAILURE; } else { SA_DBG1(("saInitialize:1 Going to HDA mode HDA 0x%X \n",ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET))); if(Double_Reset_HDA == agFALSE) { siSpcSoftReset(agRoot, SPC_HDASOFT_RESET_SIGNATURE); SA_DBG1(("saInitialize: Double_Reset_HDA HDA 0x%X \n",ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET))); Double_Reset_HDA = TRUE; goto TryWithHDA_ON; } } } else { /* check FW is running */ if (BOOTTLOADERHDA_IDLE == (ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET) & HDA_STATUS_BITS)) { /* HDA mode */ SA_DBG1(("saInitialize: No HDA mode enable and FW is not running.\n")); if(Tried_NO_HDA != agTRUE ) { Tried_NO_HDA = TRUE; swConfig->hostDirectAccessSupport = 1; swConfig->hostDirectAccessMode = 1; siSpcSoftReset(agRoot, SPC_HDASOFT_RESET_SIGNATURE); SA_DBG1(("saInitialize: 2 Going to HDA mode HDA %X \n",ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET))); goto TryWithHDA_ON; } else { SA_DBG1(("saInitialize: could not start HDA mode HDA %X \n",ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET))); smTraceFuncExit(hpDBG_VERY_LOUD, 'x', "m1"); return AGSA_RC_FAILURE; } smTraceFuncExit(hpDBG_VERY_LOUD, 'y', "m1"); return AGSA_RC_FAILURE; } } #else /* SA_ENABLE_HDA_FUNCTIONS */ /* check FW is running */ if (BOOTTLOADERHDA_IDLE == (ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET) & HDA_STATUS_BITS) ) { /* HDA mode */ SA_DBG1(("saInitialize: No HDA mode enable and FW is not running.\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'z', "m1"); return AGSA_RC_FAILURE; } #endif /* SA_ENABLE_HDA_FUNCTIONS */ } else { SA_DBG1(("saInitialize: SPCv swConfig->hostDirectAccessMode %d swConfig->hostDirectAccessSupport %d\n",swConfig->hostDirectAccessMode,swConfig->hostDirectAccessSupport)); if (swConfig->hostDirectAccessSupport) { bit32 hda_status; bit32 soft_reset_status = AGSA_RC_SUCCESS; SA_DBG1(("saInitialize: SPCv load HDA\n")); hda_status = (ossaHwRegReadExt(agRoot, PCIBAR0, SPC_V_HDA_RESPONSE_OFFSET+28)); SA_DBG1(("saInitialize: hda_status 0x%x\n",hda_status)); siScratchDump(agRoot); if( swConfig->hostDirectAccessMode == 0) { soft_reset_status = siSoftReset(agRoot, SPC_HDASOFT_RESET_SIGNATURE); if(soft_reset_status != AGSA_RC_SUCCESS) { agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE soft_reset_status\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'A', "m1"); return AGSA_RC_FAILURE; } } if((hda_status & SPC_V_HDAR_RSPCODE_MASK) != SPC_V_HDAR_IDLE) { SA_DBG1(("saInitialize: hda_status not SPC_V_HDAR_IDLE 0x%08x\n", hda_status)); soft_reset_status = siSoftReset(agRoot, SPC_HDASOFT_RESET_SIGNATURE); hda_status = (ossaHwRegReadExt(agRoot, PCIBAR0, SPC_V_HDA_RESPONSE_OFFSET+28)); if((hda_status & SPC_V_HDAR_RSPCODE_MASK) != SPC_V_HDAR_IDLE) { SA_DBG1(("saInitialize: 2 reset hda_status not SPC_V_HDAR_IDLE 0x%08x\n", hda_status)); } } if(soft_reset_status != AGSA_RC_SUCCESS) { agRoot->sdkData = agNULL; SA_DBG1(("saInitialize:AGSA_RC_FAILURE soft_reset_status A\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'B', "m1"); return AGSA_RC_FAILURE; } #ifdef SA_ENABLE_HDA_FUNCTIONS if (AGSA_RC_FAILURE == siHDAMode_V(agRoot, swConfig->hostDirectAccessMode, (agsaFwImg_t *)swConfig->param4)) { SA_DBG1(("saInitialize:AGSA_RC_FAILURE siHDAMode_V\n")); siChipResetV(agRoot, SPC_HDASOFT_RESET_SIGNATURE); agRoot->sdkData = agNULL; smTraceFuncExit(hpDBG_VERY_LOUD, 'C', "m1"); return AGSA_RC_FAILURE; } #endif /* SA_ENABLE_HDA_FUNCTIONS */ } else { SA_DBG1(("saInitialize: SPCv normal\n")); } } /* copy the table to the LL layer */ si_memcpy(&saRoot->mpiConfig.phyAnalogConfig, &hwConfig->phyAnalogConfig, sizeof(agsaPhyAnalogSetupTable_t)); #ifdef SALL_API_TEST /* Initialize the LL IO counter */ si_memset(&saRoot->LLCounters, 0, sizeof(agsaIOCountInfo_t)); #endif si_memset(&saRoot->IoErrorCount, 0, sizeof(agsaIOErrorEventStats_t)); si_memset(&saRoot->IoEventCount, 0, sizeof(agsaIOErrorEventStats_t)); if(smIS_SPC(agRoot)) { if( smIS_spc8081(agRoot)) { if (AGSA_RC_FAILURE == siBar4Shift(agRoot, MBIC_GSM_SM_BASE)) { SA_DBG1(("saInitialize: siBar4Shift FAILED ******************************************\n")); } } siSpcSoftReset(agRoot, SPC_SOFT_RESET_SIGNATURE); } if(smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: saRoot->ChipId == VEN_DEV_SPCV\n")); siChipResetV(agRoot, SPC_SOFT_RESET_SIGNATURE); } /* MPI Initialization */ ret = mpiInitialize(agRoot, &mpiMemoryAllocated, &saRoot->mpiConfig); SA_DBG1(("saInitialize: MaxOutstandingIO 0x%x swConfig->maxActiveIOs 0x%x\n", saRoot->ControllerInfo.maxPendingIO,saRoot->swConfig.maxActiveIOs )); #ifdef SA_ENABLE_HDA_FUNCTIONS if( ret == AGSA_RC_FAILURE && Tried_NO_HDA == agFALSE && smIS_SPC(agRoot)) { /* FW not flashed */ Tried_NO_HDA=agTRUE; swConfig->hostDirectAccessSupport = 1; swConfig->hostDirectAccessMode = 1; siSoftReset(agRoot, SPC_SOFT_RESET_SIGNATURE); SA_DBG1(("saInitialize: 3 Going to HDA mode HDA %X \n",ossaHwRegReadExt(agRoot, PCIBAR3, HDA_RSP_OFFSET1MB+HDA_CMD_CODE_OFFSET))); goto TryWithHDA_ON; } #endif /* SA_ENABLE_HDA_FUNCTIONS */ if( ret == AGSA_RC_FAILURE) { SA_DBG1(("saInitialize: AGSA_RC_FAILURE mpiInitialize\n")); SA_DBG1(("saInitialize: SCRATCH_PAD0 value = 0x%x\n", ossaHwRegRead(agRoot, V_Scratchpad_0_Register))); SA_DBG1(("saInitialize: SCRATCH_PAD1 value = 0x%x\n", ossaHwRegRead(agRoot, V_Scratchpad_1_Register))); SA_DBG1(("saInitialize: SCRATCH_PAD2 value = 0x%x\n", ossaHwRegRead(agRoot, V_Scratchpad_2_Register))); SA_DBG1(("saInitialize: SCRATCH_PAD3 value = 0x%x\n", ossaHwRegRead(agRoot, V_Scratchpad_3_Register))); if(saRoot->swConfig.fatalErrorInterruptEnable) { ossaDisableInterrupts(agRoot,saRoot->swConfig.fatalErrorInterruptVector ); } agRoot->sdkData = agNULL; smTraceFuncExit(hpDBG_VERY_LOUD, 'D', "m1"); return ret; } /* setup hardware interrupt coalescing control and timer registers */ if(smIS_SPCV(agRoot)) { SA_DBG1(("saInitialize: SPC_V Not set hwInterruptCoalescingTimer\n" )); SA_DBG1(("saInitialize: SPC_V Not set hwInterruptCoalescingControl\n" )); } else { ossaHwRegWriteExt(agRoot, PCIBAR1, SPC_ICTIMER,hwConfig->hwInterruptCoalescingTimer ); ossaHwRegWriteExt(agRoot, PCIBAR1, SPC_ICCONTROL, hwConfig->hwInterruptCoalescingControl); } SA_DBG1(("saInitialize: swConfig->fatalErrorInterruptEnable %X\n",swConfig->fatalErrorInterruptEnable)); SA_DBG1(("saInitialize: saRoot->swConfig.fatalErrorInterruptVector %X\n",saRoot->swConfig.fatalErrorInterruptVector)); SA_DBG1(("saInitialize: swConfig->max_MSI_InterruptVectors %X\n",swConfig->max_MSI_InterruptVectors)); SA_DBG1(("saInitialize: swConfig->max_MSIX_InterruptVectors %X\n",swConfig->max_MSIX_InterruptVectors)); SA_DBG1(("saInitialize: swConfig->legacyInt_X %X\n",swConfig->legacyInt_X)); SA_DBG1(("saInitialize: swConfig->hostDirectAccessSupport %X\n",swConfig->hostDirectAccessSupport)); SA_DBG1(("saInitialize: swConfig->hostDirectAccessMode %X\n",swConfig->hostDirectAccessMode)); #ifdef SA_CONFIG_MDFD_REGISTRY SA_DBG1(("saInitialize: swConfig->disableMDF %X\n",swConfig->disableMDF)); #endif /*SA_CONFIG_MDFD_REGISTRY*/ /*SA_DBG1(("saInitialize: swConfig->enableDIF %X\n",swConfig->enableDIF));*/ /*SA_DBG1(("saInitialize: swConfig->enableEncryption %X\n",swConfig->enableEncryption));*/ /* log message if failure */ if (AGSA_RC_FAILURE == ret) { SA_DBG1(("saInitialize:AGSA_RC_FAILURE mpiInitialize\n")); /* Assign chip status */ saRoot->chipStatus = CHIP_FATAL_ERROR; } else { /* Assign chip status */ saRoot->chipStatus = CHIP_NORMAL; #ifdef SA_FW_TIMER_READS_STATUS siTimerAdd(agRoot,SA_FW_TIMER_READS_STATUS_INTERVAL, siReadControllerStatus,0,agNULL ); #endif /* SA_FW_TIMER_READS_STATUS */ } if( ret == AGSA_RC_SUCCESS || ret == AGSA_RC_VERSION_UNTESTED) { if(gPollForMissingInt) { mpiOCQueue_t *circularQ; SA_DBG1(("saInitialize: saRoot->sysIntsActive %X\n",saRoot->sysIntsActive)); circularQ = &saRoot->outboundQueue[0]; OSSA_READ_LE_32(circularQ->agRoot, &circularQ->producerIdx, circularQ->piPointer, 0); SA_DBG1(("saInitialize: PI 0x%03x CI 0x%03x\n",circularQ->producerIdx, circularQ->consumerIdx)); } } /* If fatal error interrupt enable we need checking it during the interrupt */ SA_DBG1(("saInitialize: swConfig.fatalErrorInterruptEnable %d\n",saRoot->swConfig.fatalErrorInterruptEnable)); SA_DBG1(("saInitialize: swConfig.fatalErrorInterruptVector %d\n",saRoot->swConfig.fatalErrorInterruptVector)); SA_DBG1(("saInitialize: swConfig->max_MSIX_InterruptVectors %X\n",swConfig->max_MSIX_InterruptVectors)); if(saRoot->swConfig.fatalErrorInterruptEnable) { SA_DBG1(("saInitialize: Doorbell_Set %08X U %08X\n", ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Set_Register), ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Set_RegisterU))); SA_DBG1(("saInitialize: Doorbell_Mask %08X U %08X\n", ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Mask_Set_Register ), ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Mask_Set_RegisterU ))); ossaReenableInterrupts(agRoot,saRoot->swConfig.fatalErrorInterruptVector ); SA_DBG1(("saInitialize: Doorbell_Set %08X U %08X\n", ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Set_Register), ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Set_RegisterU))); SA_DBG1(("saInitialize: Doorbell_Mask %08X U %08X\n", ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Mask_Set_Register ), ossaHwRegReadExt(agRoot, PCIBAR0, V_Outbound_Doorbell_Mask_Set_RegisterU ))); } SA_DBG1(("saInitialize: siDumpActiveIORequests\n")); siDumpActiveIORequests(agRoot, saRoot->swConfig.maxActiveIOs); smTraceFuncExit(hpDBG_VERY_LOUD, 'E', "m1"); /* return */ return ret; } #ifdef SA_FW_TIMER_READS_STATUS bit32 siReadControllerStatus( agsaRoot_t *agRoot, bit32 Event, void * pParm ) { bit32 to_ret =0; agsaLLRoot_t *saRoot = (agsaLLRoot_t *)(agRoot->sdkData); mpiReadGSTable(agRoot, &saRoot->mpiGSTable); if(smIS_SPCV_2_IOP(agRoot)) { if(saRoot->Iop1Tcnt_last == saRoot->mpiGSTable.Iop1Tcnt ) SA_DBG2(("siReadControllerStatus: Iop1 %d STUCK\n", saRoot->mpiGSTable.Iop1Tcnt)); } if( saRoot->MsguTcnt_last == saRoot->mpiGSTable.MsguTcnt || saRoot->IopTcnt_last == saRoot->mpiGSTable.IopTcnt ) { SA_DBG1(("siReadControllerStatus: Msgu %d Iop %d\n",saRoot->mpiGSTable.MsguTcnt, saRoot->mpiGSTable.IopTcnt)); saFatalInterruptHandler(agRoot, saRoot->swConfig.fatalErrorInterruptVector ); } SA_DBG2(("siReadControllerStatus: Msgu %d Iop %d\n",saRoot->mpiGSTable.MsguTcnt, saRoot->mpiGSTable.IopTcnt)); saRoot->MsguTcnt_last = saRoot->mpiGSTable.MsguTcnt; saRoot->IopTcnt_last = saRoot->mpiGSTable.IopTcnt; saRoot->Iop1Tcnt_last = saRoot->mpiGSTable.Iop1Tcnt; if(gPollForMissingInt) { mpiOCQueue_t *circularQ; SA_DBG4(("siReadControllerStatus: saRoot->sysIntsActive %X\n",saRoot->sysIntsActive)); circularQ = &saRoot->outboundQueue[0]; OSSA_READ_LE_32(circularQ->agRoot, &circularQ->producerIdx, circularQ->piPointer, 0); if(circularQ->producerIdx != circularQ->consumerIdx) { SA_DBG1(("siReadControllerStatus: saRoot->sysIntsActive %X\n",saRoot->sysIntsActive)); SA_DBG1(("siReadControllerStatus: PI 0x%03x CI 0x%03x\n",circularQ->producerIdx, circularQ->consumerIdx)); SA_DBG1(("siReadControllerStatus:IN MSGU_READ_ODMR %08X\n",siHalRegReadExt(agRoot, GEN_MSGU_ODMR, V_Outbound_Doorbell_Mask_Set_Register ))); SA_DBG1(("siReadControllerStatus:MSGU_READ_ODR %08X\n",siHalRegReadExt(agRoot, GEN_MSGU_ODR, V_Outbound_Doorbell_Set_Register))); ossaHwRegWriteExt(agRoot, PCIBAR0,V_Outbound_Doorbell_Clear_Register, 0xFFFFFFFF ); } } siTimerAdd(agRoot,SA_FW_TIMER_READS_STATUS_INTERVAL, siReadControllerStatus,Event,pParm ); return(to_ret); } #endif /* SA_FW_TIMER_READS_STATUS */ /******************************************************************************/ /*! \brief Routine to do SPC configuration with default or specified values * * Set up configuration table in LL Layer * * \param agRoot handles for this instance of SAS/SATA hardware * \param mpiConfig MPI Configuration * \param swConfig Pointer to the software configuration * * \return -void- */ /*******************************************************************************/ GLOBAL bit32 siConfiguration( agsaRoot_t *agRoot, mpiConfig_t *mpiConfig, agsaHwConfig_t *hwConfig, agsaSwConfig_t *swConfig ) { agsaQueueConfig_t *queueConfig; bit32 intOption, enable64 = 0; bit8 i; /* sanity check */ SA_ASSERT( (agNULL != agRoot), ""); smTraceFuncEnter(hpDBG_VERY_LOUD,"m2"); si_memset(mpiConfig, 0, sizeof(mpiConfig_t)); SA_DBG1(("siConfiguration: si_memset mpiConfig\n")); #if defined(SALLSDK_DEBUG) sidump_swConfig(swConfig); #endif mpiConfig->mainConfig.custset = swConfig->FWConfig; SA_DBG1(("siConfiguration:custset %8X %8X\n",mpiConfig->mainConfig.custset,swConfig->FWConfig)); if (swConfig->param3 == agNULL) { SA_DBG1(("siConfiguration: swConfig->param3 == agNULL\n")); /* initialize the mpiConfig */ /* We configure the Host main part of configuration table */ mpiConfig->mainConfig.iQNPPD_HPPD_GEvent = 0; mpiConfig->mainConfig.outboundHWEventPID0_3 = 0; mpiConfig->mainConfig.outboundHWEventPID4_7 = 0; mpiConfig->mainConfig.outboundNCQEventPID0_3 = 0; mpiConfig->mainConfig.outboundNCQEventPID4_7 = 0; mpiConfig->mainConfig.outboundTargetITNexusEventPID0_3 = 0; mpiConfig->mainConfig.outboundTargetITNexusEventPID4_7 = 0; mpiConfig->mainConfig.outboundTargetSSPEventPID0_3 = 0; mpiConfig->mainConfig.outboundTargetSSPEventPID4_7 = 0; mpiConfig->mainConfig.ioAbortDelay = 0; mpiConfig->mainConfig.upperEventLogAddress = 0; mpiConfig->mainConfig.lowerEventLogAddress = 0; mpiConfig->mainConfig.eventLogSize = MPI_LOGSIZE; mpiConfig->mainConfig.eventLogOption = 0; mpiConfig->mainConfig.upperIOPeventLogAddress = 0; mpiConfig->mainConfig.lowerIOPeventLogAddress = 0; mpiConfig->mainConfig.IOPeventLogSize = MPI_LOGSIZE; mpiConfig->mainConfig.IOPeventLogOption = 0; mpiConfig->mainConfig.FatalErrorInterrupt = 0; /* save the default value */ mpiConfig->numInboundQueues = AGSA_MAX_INBOUND_Q; mpiConfig->numOutboundQueues = AGSA_MAX_OUTBOUND_Q; mpiConfig->maxNumInboundQueues = AGSA_MAX_INBOUND_Q; mpiConfig->maxNumOutboundQueues = AGSA_MAX_OUTBOUND_Q; /* configure inbound queues */ for ( i = 0; i < AGSA_MAX_INBOUND_Q; i ++ ) { mpiConfig->inboundQueues[i].numElements = INBOUND_DEPTH_SIZE; mpiConfig->inboundQueues[i].elementSize = IOMB_SIZE64; mpiConfig->inboundQueues[i].priority = MPI_QUEUE_NORMAL; } /* configure outbound queues */ for ( i = 0; i < AGSA_MAX_OUTBOUND_Q; i ++ ) { mpiConfig->outboundQueues[i].numElements = OUTBOUND_DEPTH_SIZE; mpiConfig->outboundQueues[i].elementSize = IOMB_SIZE64; mpiConfig->outboundQueues[i].interruptVector = 0; mpiConfig->outboundQueues[i].interruptDelay = 0; mpiConfig->outboundQueues[i].interruptThreshold = 0; /* always enable OQ interrupt */ mpiConfig->outboundQueues[i].interruptEnable = 1; } } else { /* Parm3 is not null */ queueConfig = (agsaQueueConfig_t *)swConfig->param3; #if defined(SALLSDK_DEBUG) sidump_Q_config( queueConfig ); #endif SA_DBG1(("siConfiguration: swConfig->param3 == %p\n",queueConfig)); if ((queueConfig->numInboundQueues > AGSA_MAX_INBOUND_Q) || (queueConfig->numOutboundQueues > AGSA_MAX_OUTBOUND_Q)) { smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m2"); SA_DBG1(("siConfiguration:AGSA_RC_FAILURE MAX_Q\n")); return AGSA_RC_FAILURE; } if ((queueConfig->numInboundQueues == 0 || queueConfig->numOutboundQueues == 0 )) { smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "m2"); SA_DBG1(("siConfiguration:AGSA_RC_FAILURE NO_Q\n")); return AGSA_RC_FAILURE; } mpiConfig->mainConfig.eventLogSize = swConfig->sizefEventLog1 * KBYTES; mpiConfig->mainConfig.eventLogOption = swConfig->eventLog1Option; mpiConfig->mainConfig.IOPeventLogSize = swConfig->sizefEventLog2 * KBYTES; mpiConfig->mainConfig.IOPeventLogOption = swConfig->eventLog2Option; if ((queueConfig->numInboundQueues > IQ_NUM_32) || (queueConfig->numOutboundQueues > OQ_NUM_32)) { enable64 = 1; } if (agNULL == hwConfig) { intOption = 0; } else { #if defined(SALLSDK_DEBUG) sidump_hwConfig(hwConfig); #endif if(smIS_SPCV(agRoot)) { intOption = 0; } else { intOption = hwConfig->intReassertionOption & INT_OPTION; } } /* Enable SGPIO */ swConfig->sgpioSupportEnable = 1; /* set bit for normal priority or high priority path */ /* set fatal error interrupt enable and vector */ /* set Interrupt Reassertion enable and 64 IQ/OQ enable */ mpiConfig->mainConfig.FatalErrorInterrupt = (swConfig->fatalErrorInterruptEnable) /* bit 0*/ | (hwConfig == agNULL ? 0: (hwConfig->hwOption & HW_CFG_PICI_EFFECTIVE_ADDRESS ? (0x1 << SHIFT1): 0))| (swConfig->sgpioSupportEnable ? (0x1 << SHIFT2): 0) | /* compile option SA_ENABLE_POISION_TLP */(SA_PTNFE_POISION_TLP << SHIFT3) | #ifdef SA_CONFIG_MDFD_REGISTRY (swConfig->disableMDF ? (0x1 << SHIFT4): 0) | #else /* compile option SA_DISABLE_MDFD */ (SA_MDFD_MULTI_DATA_FETCH << SHIFT4) | #endif /*SA_CONFIG_MDFD_REGISTRY*/ /* compile option SA_DISABLE_OB_COAL */(SA_OUTBOUND_COALESCE << SHIFT5) | /* compile option SA_ENABLE_ARBTE */(SA_ARBTE << SHIFT6) | ((swConfig->fatalErrorInterruptVector & FATAL_ERROR_INT_BITS) << SHIFT8) | (enable64 << SHIFT16) | (intOption << SHIFT17); SA_DBG1(("siConfiguration: swConfig->fatalErrorInterruptEnable %X\n",swConfig->fatalErrorInterruptEnable)); SA_DBG1(("siConfiguration: swConfig->fatalErrorInterruptVector %X\n",swConfig->fatalErrorInterruptVector)); /* initialize the mpiConfig */ /* We configure the Host main part of configuration table */ mpiConfig->mainConfig.outboundTargetITNexusEventPID0_3 = 0; mpiConfig->mainConfig.outboundTargetITNexusEventPID4_7 = 0; mpiConfig->mainConfig.outboundTargetSSPEventPID0_3 = 0; mpiConfig->mainConfig.outboundTargetSSPEventPID4_7 = 0; mpiConfig->mainConfig.ioAbortDelay = 0; mpiConfig->mainConfig.PortRecoveryTimerPortResetTimer = swConfig->PortRecoveryResetTimer; /* get parameter from queueConfig */ mpiConfig->mainConfig.iQNPPD_HPPD_GEvent = queueConfig->iqNormalPriorityProcessingDepth | (queueConfig->iqHighPriorityProcessingDepth << SHIFT8) | (queueConfig->generalEventQueue << SHIFT16) | (queueConfig->tgtDeviceRemovedEventQueue << SHIFT24); mpiConfig->mainConfig.outboundHWEventPID0_3 = queueConfig->sasHwEventQueue[0] | (queueConfig->sasHwEventQueue[1] << SHIFT8) | (queueConfig->sasHwEventQueue[2] << SHIFT16) | (queueConfig->sasHwEventQueue[3] << SHIFT24); mpiConfig->mainConfig.outboundHWEventPID4_7 = queueConfig->sasHwEventQueue[4] | (queueConfig->sasHwEventQueue[5] << SHIFT8) | (queueConfig->sasHwEventQueue[6] << SHIFT16) | (queueConfig->sasHwEventQueue[7] << SHIFT24); mpiConfig->mainConfig.outboundNCQEventPID0_3 = queueConfig->sataNCQErrorEventQueue[0] | (queueConfig->sataNCQErrorEventQueue[1] << SHIFT8) | (queueConfig->sataNCQErrorEventQueue[2] << SHIFT16) | (queueConfig->sataNCQErrorEventQueue[3] << SHIFT24); mpiConfig->mainConfig.outboundNCQEventPID4_7 = queueConfig->sataNCQErrorEventQueue[4] | (queueConfig->sataNCQErrorEventQueue[5] << SHIFT8) | (queueConfig->sataNCQErrorEventQueue[6] << SHIFT16) | (queueConfig->sataNCQErrorEventQueue[7] << SHIFT24); /* save it */ mpiConfig->numInboundQueues = queueConfig->numInboundQueues; mpiConfig->numOutboundQueues = queueConfig->numOutboundQueues; mpiConfig->queueOption = queueConfig->queueOption; SA_DBG2(("siConfiguration: numInboundQueues=%d numOutboundQueues=%d\n", queueConfig->numInboundQueues, queueConfig->numOutboundQueues)); /* configure inbound queues */ /* We configure the size of queue based on swConfig */ for( i = 0; i < queueConfig->numInboundQueues; i ++ ) { mpiConfig->inboundQueues[i].numElements = (bit16)queueConfig->inboundQueues[i].elementCount; - mpiConfig->inboundQueues[i].elementSize = (bit16)queueConfig->inboundQueues[i].elementSize;; + mpiConfig->inboundQueues[i].elementSize = (bit16)queueConfig->inboundQueues[i].elementSize; mpiConfig->inboundQueues[i].priority = queueConfig->inboundQueues[i].priority; SA_DBG2(("siConfiguration: IBQ%d:elementCount=%d elementSize=%d priority=%d Total Size 0x%X\n", i, queueConfig->inboundQueues[i].elementCount, queueConfig->inboundQueues[i].elementSize, queueConfig->inboundQueues[i].priority, queueConfig->inboundQueues[i].elementCount * queueConfig->inboundQueues[i].elementSize )); } /* configura outbound queues */ /* We configure the size of queue based on swConfig */ for( i = 0; i < queueConfig->numOutboundQueues; i ++ ) { mpiConfig->outboundQueues[i].numElements = (bit16)queueConfig->outboundQueues[i].elementCount; mpiConfig->outboundQueues[i].elementSize = (bit16)queueConfig->outboundQueues[i].elementSize; mpiConfig->outboundQueues[i].interruptVector = (bit8)queueConfig->outboundQueues[i].interruptVectorIndex; mpiConfig->outboundQueues[i].interruptDelay = (bit16)queueConfig->outboundQueues[i].interruptDelay; mpiConfig->outboundQueues[i].interruptThreshold = (bit8)queueConfig->outboundQueues[i].interruptCount; mpiConfig->outboundQueues[i].interruptEnable = (bit32)queueConfig->outboundQueues[i].interruptEnable; SA_DBG2(("siConfiguration: OBQ%d:elementCount=%d elementSize=%d interruptCount=%d interruptEnable=%d\n", i, queueConfig->outboundQueues[i].elementCount, queueConfig->outboundQueues[i].elementSize, queueConfig->outboundQueues[i].interruptCount, queueConfig->outboundQueues[i].interruptEnable)); } } SA_DBG1(("siConfiguration:mpiConfig->mainConfig.FatalErrorInterrupt 0x%X\n",mpiConfig->mainConfig.FatalErrorInterrupt)); SA_DBG1(("siConfiguration:swConfig->fatalErrorInterruptVector 0x%X\n",swConfig->fatalErrorInterruptVector)); SA_DBG1(("siConfiguration:enable64 0x%X\n",enable64)); SA_DBG1(("siConfiguration:PortRecoveryResetTimer 0x%X\n",swConfig->PortRecoveryResetTimer)); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "m2"); /* return */ return AGSA_RC_SUCCESS; } #ifdef FW_EVT_LOG_TST void saLogDump(agsaRoot_t *agRoot, U32 *eventLogSize, U32 **eventLogAddress_) { agsaLLRoot_t *saRoot = (agsaLLRoot_t *)(agRoot->sdkData); //mpiConfig_t *mpiConfig = &saRoot->mpiConfig; mpiHostLLConfigDescriptor_t *mpiConfig = &saRoot->mainConfigTable; *eventLogAddress_ = (U32*)eventLogAddress; *eventLogSize = (U32)mpiConfig->eventLogSize; } #endif /*******************************************************************************/ /** \fn mpiInitialize(agsaRoot *agRoot, mpiMemReq_t* memoryAllocated, mpiConfig_t* config) * \brief Initializes the MPI Message Unit * \param agRoot Pointer to a data structure containing LL layer context handles * \param memoryAllocated Data structure that holds the different chunks of memory that are allocated * \param config MPI configuration * * This function is called to initialize SPC_HOST_MPI internal data structures and the SPC hardware. * This function is competed synch->ronously (there is no callback) * * Return: * AGSA_RC_SUCCESS if initialization succeeded. * AGSA_RC_FAILURE if initialization failed. */ /*******************************************************************************/ GLOBAL bit32 mpiInitialize(agsaRoot_t *agRoot, mpiMemReq_t* memoryAllocated, mpiConfig_t* config) { static spc_configMainDescriptor_t mainCfg; /* main part of MPI configuration */ static spc_inboundQueueDescriptor_t inQueueCfg; /* Inbound queue HW configuration structure */ static spc_outboundQueueDescriptor_t outQueueCfg; /* Outbound queue HW configuration structure */ bit16 qIdx, i, indexoffset; /* Queue index */ bit16 mIdx = 0; /* Memory region index */ bit32 MSGUCfgTblDWIdx, GSTLenMPIS; bit32 MSGUCfgTblBase, ret = AGSA_RC_SUCCESS; bit32 value, togglevalue; bit32 saveOffset; bit32 inboundoffset, outboundoffset; bit8 pcibar; bit16 maxinbound = AGSA_MAX_INBOUND_Q; bit16 maxoutbound = AGSA_MAX_OUTBOUND_Q; bit32 OB_CIPCIBar; bit32 IB_PIPCIBar; bit32 max_wait_time; bit32 max_wait_count; bit32 memOffset; agsaLLRoot_t *saRoot; mpiICQueue_t *circularIQ = agNULL; mpiOCQueue_t *circularOQ; bit32 mpiUnInitFailed = 0; bit32 mpiStartToggleFailed = 0; #if defined(SALLSDK_DEBUG) bit8 phycount = AGSA_MAX_VALID_PHYS; #endif /* SALLSDK_DEBUG */ SA_DBG1(("mpiInitialize: Entering\n")); SA_ASSERT(NULL != agRoot, "agRoot argument cannot be null"); SA_ASSERT(NULL != memoryAllocated, "memoryAllocated argument cannot be null"); SA_ASSERT(NULL != config, "config argument cannot be null"); SA_ASSERT(0 == (sizeof(spc_inboundQueueDescriptor_t) % 4), "spc_inboundQueueDescriptor_t type size has to be divisible by 4"); saRoot = (agsaLLRoot_t *)(agRoot->sdkData); si_memset(&mainCfg,0,sizeof(spc_configMainDescriptor_t)); si_memset(&inQueueCfg,0,sizeof(spc_inboundQueueDescriptor_t)); si_memset(&outQueueCfg,0,sizeof(spc_outboundQueueDescriptor_t)); SA_ASSERT((agNULL !=saRoot ), ""); if(saRoot == agNULL) { SA_DBG1(("mpiInitialize: saRoot == agNULL\n")); return(AGSA_RC_FAILURE); } smTraceFuncEnter(hpDBG_VERY_LOUD,"m3"); /*Shift BAR 4 for SPC HAILEAH*/ if(smIS_SPC(agRoot)) { if( smIS_HIL(agRoot)) { if (AGSA_RC_FAILURE == siBar4Shift(agRoot, MBIC_GSM_SM_BASE)) { SA_DBG1(("mpiInitialize: siBar4Shift FAILED ******************************************\n")); return AGSA_RC_FAILURE; } } } /* Wait for the SPC Configuration Table to be ready */ ret = mpiWaitForConfigTable(agRoot, &mainCfg); if (AGSA_RC_FAILURE == ret) { /* return error if MPI Configuration Table not ready */ SA_DBG1(("mpiInitialize: mpiWaitForConfigTable FAILED ******************************************\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m3"); return ret; } /* read scratch pad0 to get PCI BAR and offset of configuration table */ MSGUCfgTblBase = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); /* get PCI BAR */ MSGUCfgTblBase = (MSGUCfgTblBase & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* get pci Bar index */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, MSGUCfgTblBase); SA_DBG1(("mpiInitialize: MSGUCfgTblBase = 0x%x\n", MSGUCfgTblBase)); #if defined(SALLSDK_DEBUG) /* get Phy count from configuration table */ phycount = (bit8)((mainCfg.ContrlCapFlag & PHY_COUNT_BITS) >> SHIFT19); SA_DBG1(("mpiInitialize: Number of PHYs = 0x%x\n", phycount)); smTrace(hpDBG_VERY_LOUD,"70",phycount); /* TP:70 phycount */ #endif /* SALLSDK_DEBUG */ /* get High Priority IQ support flag */ if (mainCfg.ContrlCapFlag & HP_SUPPORT_BIT) { SA_DBG1(("mpiInitialize: High Priority IQ support from SPC\n")); } /* get Interrupt Coalescing Support flag */ if (mainCfg.ContrlCapFlag & INT_COL_BIT) { SA_DBG1(("mpiInitialize: Interrupt Coalescing support from SPC\n")); } /* get configured the number of inbound/outbound queues */ if (memoryAllocated->count == TOTAL_MPI_MEM_CHUNKS) { config->maxNumInboundQueues = AGSA_MAX_INBOUND_Q; config->maxNumOutboundQueues = AGSA_MAX_OUTBOUND_Q; } else { config->maxNumInboundQueues = config->numInboundQueues; config->maxNumOutboundQueues = config->numOutboundQueues; maxinbound = config->numInboundQueues; maxoutbound = config->numOutboundQueues; } SA_DBG1(("mpiInitialize: Number of IQ %d\n", maxinbound)); SA_DBG1(("mpiInitialize: Number of OQ %d\n", maxoutbound)); /* get inbound queue offset */ inboundoffset = mainCfg.inboundQueueOffset; /* get outbound queue offset */ outboundoffset = mainCfg.outboundQueueOffset; if(smIS_SPCV(agRoot)) { SA_DBG2(("mpiInitialize: Offset of IQ %d\n", (inboundoffset & 0xFF000000) >> 24)); SA_DBG2(("mpiInitialize: Offset of OQ %d\n", (outboundoffset & 0xFF000000) >> 24)); inboundoffset &= 0x00FFFFFF; outboundoffset &= 0x00FFFFFF; } /* get offset of the configuration table */ MSGUCfgTblDWIdx = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); MSGUCfgTblDWIdx = MSGUCfgTblDWIdx & SCRATCH_PAD0_OFFSET_MASK; saveOffset = MSGUCfgTblDWIdx; /* Checks if the configuration memory region size is the same as the mpiConfigMain */ if(memoryAllocated->region[mIdx].totalLength != sizeof(bit8) * config->mainConfig.eventLogSize) { SA_DBG1(("ERROR: The memory region [%d] 0x%X != 0x%X does not have the size of the MSGU event log ******************************************\n", mIdx,memoryAllocated->region[mIdx].totalLength,config->mainConfig.eventLogSize)); smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "m3"); return AGSA_RC_FAILURE; } mainCfg.iQNPPD_HPPD_GEvent = config->mainConfig.iQNPPD_HPPD_GEvent; if(smIS_SPCV(agRoot)) { mainCfg.outboundHWEventPID0_3 = 0; mainCfg.outboundHWEventPID4_7 = 0; mainCfg.outboundNCQEventPID0_3 = 0; mainCfg.outboundNCQEventPID4_7 = 0; mainCfg.outboundTargetITNexusEventPID0_3 = 0; mainCfg.outboundTargetITNexusEventPID4_7 = 0; mainCfg.outboundTargetSSPEventPID0_3 = 0; mainCfg.outboundTargetSSPEventPID4_7 = 0; mainCfg.ioAbortDelay = 0; /* SPCV reserved */ mainCfg.custset = 0; mainCfg.portRecoveryResetTimer = config->mainConfig.PortRecoveryTimerPortResetTimer; SA_DBG1(("mpiInitialize:custset V %8X\n",mainCfg.custset)); SA_DBG1(("mpiInitialize:portRecoveryResetTimer V %8X\n",mainCfg.portRecoveryResetTimer)); mainCfg.interruptReassertionDelay = saRoot->hwConfig.intReassertionOption; SA_DBG1(("mpiInitialize:interruptReassertionDelay V %8X\n", mainCfg.interruptReassertionDelay)); } else { mainCfg.outboundHWEventPID0_3 = config->mainConfig.outboundHWEventPID0_3; mainCfg.outboundHWEventPID4_7 = config->mainConfig.outboundHWEventPID4_7; mainCfg.outboundNCQEventPID0_3 = config->mainConfig.outboundNCQEventPID0_3; mainCfg.outboundNCQEventPID4_7 = config->mainConfig.outboundNCQEventPID4_7; mainCfg.outboundTargetITNexusEventPID0_3 = config->mainConfig.outboundTargetITNexusEventPID0_3; mainCfg.outboundTargetITNexusEventPID4_7 = config->mainConfig.outboundTargetITNexusEventPID4_7; mainCfg.outboundTargetSSPEventPID0_3 = config->mainConfig.outboundTargetSSPEventPID0_3; mainCfg.outboundTargetSSPEventPID4_7 = config->mainConfig.outboundTargetSSPEventPID4_7; mainCfg.ioAbortDelay = config->mainConfig.ioAbortDelay; mainCfg.custset = config->mainConfig.custset; SA_DBG1(("mpiInitialize:custset spc %8X\n",mainCfg.custset)); } #ifdef FW_EVT_LOG_TST eventLogAddress = memoryAllocated->region[mIdx].virtPtr; #endif mainCfg.upperEventLogAddress = memoryAllocated->region[mIdx].physAddrUpper; mainCfg.lowerEventLogAddress = memoryAllocated->region[mIdx].physAddrLower; mainCfg.eventLogSize = config->mainConfig.eventLogSize; mainCfg.eventLogOption = config->mainConfig.eventLogOption; mIdx++; /* Checks if the configuration memory region size is the same as the mpiConfigMain */ if(memoryAllocated->region[mIdx].totalLength != sizeof(bit8) * config->mainConfig.IOPeventLogSize) { SA_DBG1(("ERROR: The memory region does not have the size of the IOP event log\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "m3"); return AGSA_RC_FAILURE; } mainCfg.upperIOPeventLogAddress = memoryAllocated->region[mIdx].physAddrUpper; mainCfg.lowerIOPeventLogAddress = memoryAllocated->region[mIdx].physAddrLower; mainCfg.IOPeventLogSize = config->mainConfig.IOPeventLogSize; mainCfg.IOPeventLogOption = config->mainConfig.IOPeventLogOption; mainCfg.FatalErrorInterrupt = config->mainConfig.FatalErrorInterrupt; SA_DBG1(("mpiInitialize: iQNPPD_HPPD_GEvent 0x%x\n", mainCfg.iQNPPD_HPPD_GEvent)); if(smIS_SPCV(agRoot)) { } else { SA_DBG3(("mpiInitialize: outboundHWEventPID0_3 0x%x\n", mainCfg.outboundHWEventPID0_3)); SA_DBG3(("mpiInitialize: outboundHWEventPID4_7 0x%x\n", mainCfg.outboundHWEventPID4_7)); SA_DBG3(("mpiInitialize: outboundNCQEventPID0_3 0x%x\n", mainCfg.outboundNCQEventPID0_3)); SA_DBG3(("mpiInitialize: outboundNCQEventPID4_7 0x%x\n", mainCfg.outboundNCQEventPID4_7)); SA_DBG3(("mpiInitialize: outboundTargetITNexusEventPID0_3 0x%x\n", mainCfg.outboundTargetITNexusEventPID0_3)); SA_DBG3(("mpiInitialize: outboundTargetITNexusEventPID4_7 0x%x\n", mainCfg.outboundTargetITNexusEventPID4_7)); SA_DBG3(("mpiInitialize: outboundTargetSSPEventPID0_3 0x%x\n", mainCfg.outboundTargetSSPEventPID0_3)); SA_DBG3(("mpiInitialize: outboundTargetSSPEventPID4_7 0x%x\n", mainCfg.outboundTargetSSPEventPID4_7)); } SA_DBG3(("mpiInitialize: upperEventLogAddress 0x%x\n", mainCfg.upperEventLogAddress)); SA_DBG3(("mpiInitialize: lowerEventLogAddress 0x%x\n", mainCfg.lowerEventLogAddress)); SA_DBG3(("mpiInitialize: eventLogSize 0x%x\n", mainCfg.eventLogSize)); SA_DBG3(("mpiInitialize: eventLogOption 0x%x\n", mainCfg.eventLogOption)); #ifdef FW_EVT_LOG_TST SA_DBG3(("mpiInitialize: eventLogAddress 0x%p\n", eventLogAddress)); #endif SA_DBG3(("mpiInitialize: upperIOPLogAddress 0x%x\n", mainCfg.upperIOPeventLogAddress)); SA_DBG3(("mpiInitialize: lowerIOPLogAddress 0x%x\n", mainCfg.lowerIOPeventLogAddress)); SA_DBG3(("mpiInitialize: IOPeventLogSize 0x%x\n", mainCfg.IOPeventLogSize)); SA_DBG3(("mpiInitialize: IOPeventLogOption 0x%x\n", mainCfg.IOPeventLogOption)); SA_DBG3(("mpiInitialize: FatalErrorInterrupt 0x%x\n", mainCfg.FatalErrorInterrupt)); SA_DBG3(("mpiInitialize: HDAModeFlags 0x%x\n", mainCfg.HDAModeFlags)); SA_DBG3(("mpiInitialize: analogSetupTblOffset 0x%08x\n", mainCfg.analogSetupTblOffset)); saRoot->mainConfigTable.iQNPPD_HPPD_GEvent = mainCfg.iQNPPD_HPPD_GEvent; if(smIS_SPCV(agRoot)) { /* SPCV - reserved fields */ saRoot->mainConfigTable.outboundHWEventPID0_3 = 0; saRoot->mainConfigTable.outboundHWEventPID4_7 = 0; saRoot->mainConfigTable.outboundNCQEventPID0_3 = 0; saRoot->mainConfigTable.outboundNCQEventPID4_7 = 0; saRoot->mainConfigTable.outboundTargetITNexusEventPID0_3 = 0; saRoot->mainConfigTable.outboundTargetITNexusEventPID4_7 = 0; saRoot->mainConfigTable.outboundTargetSSPEventPID0_3 = 0; saRoot->mainConfigTable.outboundTargetSSPEventPID4_7 = 0; saRoot->mainConfigTable.ioAbortDelay = 0; saRoot->mainConfigTable.custset = 0; } else { saRoot->mainConfigTable.outboundHWEventPID0_3 = mainCfg.outboundHWEventPID0_3; saRoot->mainConfigTable.outboundHWEventPID4_7 = mainCfg.outboundHWEventPID4_7; saRoot->mainConfigTable.outboundNCQEventPID0_3 = mainCfg.outboundNCQEventPID0_3; saRoot->mainConfigTable.outboundNCQEventPID4_7 = mainCfg.outboundNCQEventPID4_7; saRoot->mainConfigTable.outboundTargetITNexusEventPID0_3 = mainCfg.outboundTargetITNexusEventPID0_3; saRoot->mainConfigTable.outboundTargetITNexusEventPID4_7 = mainCfg.outboundTargetITNexusEventPID4_7; saRoot->mainConfigTable.outboundTargetSSPEventPID0_3 = mainCfg.outboundTargetSSPEventPID0_3; saRoot->mainConfigTable.outboundTargetSSPEventPID4_7 = mainCfg.outboundTargetSSPEventPID4_7; saRoot->mainConfigTable.ioAbortDelay = mainCfg.ioAbortDelay; saRoot->mainConfigTable.custset = mainCfg.custset; } saRoot->mainConfigTable.upperEventLogAddress = mainCfg.upperEventLogAddress; saRoot->mainConfigTable.lowerEventLogAddress = mainCfg.lowerEventLogAddress; saRoot->mainConfigTable.eventLogSize = mainCfg.eventLogSize; saRoot->mainConfigTable.eventLogOption = mainCfg.eventLogOption; saRoot->mainConfigTable.upperIOPeventLogAddress = mainCfg.upperIOPeventLogAddress; saRoot->mainConfigTable.lowerIOPeventLogAddress = mainCfg.lowerIOPeventLogAddress; saRoot->mainConfigTable.IOPeventLogSize = mainCfg.IOPeventLogSize; saRoot->mainConfigTable.IOPeventLogOption = mainCfg.IOPeventLogOption; saRoot->mainConfigTable.FatalErrorInterrupt = mainCfg.FatalErrorInterrupt; if(smIS_SPCV(agRoot)) { ;/* SPCV - reserved fields */ } else { saRoot->mainConfigTable.HDAModeFlags = mainCfg.HDAModeFlags; } saRoot->mainConfigTable.analogSetupTblOffset = mainCfg.analogSetupTblOffset; smTrace(hpDBG_VERY_LOUD,"71",mIdx); /* TP:71 71 mIdx */ ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IQNPPD_HPPD_OFFSET), mainCfg.iQNPPD_HPPD_GEvent); SA_DBG3(("mpiInitialize: Offset 0x%08x mainCfg.iQNPPD_HPPD_GEvent 0x%x\n", (bit32)(MSGUCfgTblDWIdx + MAIN_IQNPPD_HPPD_OFFSET), mainCfg.iQNPPD_HPPD_GEvent)); if(smIS_SPC6V(agRoot)) { if(smIsCfgVREV_B(agRoot)) { ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IO_ABORT_DELAY), MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE); SA_DBG1(("mpiInitialize:SPCV - MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE\n" )); } if(smIsCfgVREV_C(agRoot)) { SA_DBG1(("mpiInitialize:SPCV - END_TO_END_CRC On\n" )); } SA_DBG3(("mpiInitialize:SPCV - rest reserved field \n" )); ;/* SPCV - reserved field */ } else if(smIS_SPC(agRoot)) { ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_HW_EVENT_PID03_OFFSET), mainCfg.outboundHWEventPID0_3); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_HW_EVENT_PID47_OFFSET), mainCfg.outboundHWEventPID4_7); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_NCQ_EVENT_PID03_OFFSET), mainCfg.outboundNCQEventPID0_3); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_NCQ_EVENT_PID47_OFFSET), mainCfg.outboundNCQEventPID4_7); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_TITNX_EVENT_PID03_OFFSET), mainCfg.outboundTargetITNexusEventPID0_3); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_TITNX_EVENT_PID47_OFFSET), mainCfg.outboundTargetITNexusEventPID4_7); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_SSP_EVENT_PID03_OFFSET), mainCfg.outboundTargetSSPEventPID0_3); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_OB_SSP_EVENT_PID47_OFFSET), mainCfg.outboundTargetSSPEventPID4_7); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_CUSTOMER_SETTING), mainCfg.custset); }else { if(smIsCfgVREV_A(agRoot)) { ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IO_ABORT_DELAY), MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE); /* */ SA_DBG1(("mpiInitialize:SPCV12G - offset MAIN_IO_ABORT_DELAY 0x%x value MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE 0x%x\n",MAIN_IO_ABORT_DELAY ,MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE)); SA_DBG1(("mpiInitialize:SPCV12G - END_TO_END_CRC OFF for rev A %d\n",smIsCfgVREV_A(agRoot) )); } else if(smIsCfgVREV_B(agRoot)) { SA_DBG1(("mpiInitialize:SPCV12G - END_TO_END_CRC ON rev B %d ****************************\n",smIsCfgVREV_B(agRoot) )); /*ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IO_ABORT_DELAY), MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE); */ } else if(smIsCfgVREV_C(agRoot)) { SA_DBG1(("mpiInitialize:SPCV12G - END_TO_END_CRC on rev C %d\n",smIsCfgVREV_C(agRoot) )); } else { ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IO_ABORT_DELAY), MAIN_IO_ABORT_DELAY_END_TO_END_CRC_DISABLE); SA_DBG1(("mpiInitialize:SPCV12G - END_TO_END_CRC Off unknown rev 0x%x\n", ossaHwRegReadConfig32((agRoot), 8 ))); } } ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_HI), mainCfg.upperEventLogAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_LO), mainCfg.lowerEventLogAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_BUFF_SIZE), mainCfg.eventLogSize); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_OPTION), mainCfg.eventLogOption); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_HI), mainCfg.upperIOPeventLogAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_LO), mainCfg.lowerIOPeventLogAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_BUFF_SIZE), mainCfg.IOPeventLogSize); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_OPTION), mainCfg.IOPeventLogOption); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_INTERRUPT), mainCfg.FatalErrorInterrupt); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_PRECTD_PRESETD), mainCfg.portRecoveryResetTimer); SA_DBG3(("mpiInitialize: Offset 0x%08x upperEventLogAddress 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_HI), mainCfg.upperEventLogAddress )); SA_DBG3(("mpiInitialize: Offset 0x%08x lowerEventLogAddress 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_LO), mainCfg.lowerEventLogAddress )); SA_DBG3(("mpiInitialize: Offset 0x%08x eventLogSize 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_BUFF_SIZE), mainCfg.eventLogSize )); SA_DBG3(("mpiInitialize: Offset 0x%08x eventLogOption 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_OPTION), mainCfg.eventLogOption )); SA_DBG3(("mpiInitialize: Offset 0x%08x upperIOPeventLogAddress 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_HI), mainCfg.upperIOPeventLogAddress )); SA_DBG3(("mpiInitialize: Offset 0x%08x lowerIOPeventLogAddress 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_LO), mainCfg.lowerIOPeventLogAddress )); SA_DBG3(("mpiInitialize: Offset 0x%08x IOPeventLogSize 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_BUFF_SIZE), mainCfg.IOPeventLogSize )); SA_DBG3(("mpiInitialize: Offset 0x%08x IOPeventLogOption 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_OPTION), mainCfg.IOPeventLogOption )); SA_DBG3(("mpiInitialize: Offset 0x%08x FatalErrorInterrupt 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_INTERRUPT), mainCfg.FatalErrorInterrupt )); SA_DBG3(("mpiInitialize: Offset 0x%08x PortRecoveryResetTimer 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_PRECTD_PRESETD), mainCfg.portRecoveryResetTimer )); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IRAD_RESERVED), mainCfg.interruptReassertionDelay); SA_DBG3(("mpiInitialize: Offset 0x%08x InterruptReassertionDelay 0x%x\n",(bit32)(MSGUCfgTblDWIdx + MAIN_IRAD_RESERVED), mainCfg.interruptReassertionDelay )); mIdx++; /* skip the ci and pi memory region */ mIdx++; mIdx++; smTrace(hpDBG_VERY_LOUD,"72",mIdx); /* TP:72 mIdx */ smTrace(hpDBG_VERY_LOUD,"Bc",maxinbound); /* TP:Bc maxinbound */ smTrace(hpDBG_VERY_LOUD,"Bd",pcibar); /* TP:Bd pcibar */ /* index offset */ indexoffset = 0; memOffset = 0; /* Memory regions for the inbound queues */ for(qIdx = 0; qIdx < maxinbound; qIdx++) { /* point back to the begin then plus offset to next queue */ smTrace(hpDBG_VERY_LOUD,"Bd",pcibar); /* TP:Bd pcibar */ MSGUCfgTblDWIdx = saveOffset; MSGUCfgTblDWIdx += inboundoffset; MSGUCfgTblDWIdx += (sizeof(spc_inboundQueueDescriptor_t) * qIdx); SA_DBG1(("mpiInitialize: A saveOffset 0x%x MSGUCfgTblDWIdx 0x%x\n",saveOffset ,MSGUCfgTblDWIdx)); /* if the MPI configuration says that this queue is disabled ... */ if(0 == config->inboundQueues[qIdx].numElements) { /* ... Clears the configuration table for this queue */ inQueueCfg.elementPriSizeCount= 0; inQueueCfg.upperBaseAddress = 0; inQueueCfg.lowerBaseAddress = 0; inQueueCfg.ciUpperBaseAddress = 0; inQueueCfg.ciLowerBaseAddress = 0; /* skip inQueueCfg.PIPCIBar (PM8000 write access) */ /* skip inQueueCfg.PIOffset (PM8000 write access) */ /* Update the inbound configuration table in SPC GSM */ mpiUpdateIBQueueCfgTable(agRoot, &inQueueCfg, MSGUCfgTblDWIdx, pcibar); } /* If the queue is enabled, then ... */ else { bit32 memSize = config->inboundQueues[qIdx].numElements * config->inboundQueues[qIdx].elementSize; bit32 remainder = memSize & 127; /* Calculate the size of this queue padded to 128 bytes */ if (remainder > 0) { memSize += (128 - remainder); } /* ... first checks that the memory region has the right size */ if( (memoryAllocated->region[mIdx].totalLength - memOffset < memSize) || (NULL == memoryAllocated->region[mIdx].virtPtr) || (0 == memoryAllocated->region[mIdx].totalLength)) { SA_DBG1(("mpiInitialize: ERROR The memory region does not have the right size for this inbound queue")); smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "m3"); return AGSA_RC_FAILURE; } else { /* Then, using the MPI configuration argument, initializes the corresponding element on the saRoot */ saRoot->inboundQueue[qIdx].numElements = config->inboundQueues[qIdx].numElements; saRoot->inboundQueue[qIdx].elementSize = config->inboundQueues[qIdx].elementSize; saRoot->inboundQueue[qIdx].priority = config->inboundQueues[qIdx].priority; si_memcpy(&saRoot->inboundQueue[qIdx].memoryRegion, &memoryAllocated->region[mIdx], sizeof(mpiMem_t)); saRoot->inboundQueue[qIdx].memoryRegion.virtPtr = (bit8 *)saRoot->inboundQueue[qIdx].memoryRegion.virtPtr + memOffset; saRoot->inboundQueue[qIdx].memoryRegion.physAddrLower += memOffset; saRoot->inboundQueue[qIdx].memoryRegion.elementSize = memSize; saRoot->inboundQueue[qIdx].memoryRegion.totalLength = memSize; saRoot->inboundQueue[qIdx].memoryRegion.numElements = 1; /* Initialize the local copy of PIs, CIs */ SA_DBG1(("mpiInitialize: queue %d PI CI zero\n",qIdx)); saRoot->inboundQueue[qIdx].producerIdx = 0; saRoot->inboundQueue[qIdx].consumerIdx = 0; saRoot->inboundQueue[qIdx].agRoot = agRoot; /* MPI memory region for inbound CIs are 2 */ saRoot->inboundQueue[qIdx].ciPointer = (((bit8 *)(memoryAllocated->region[MPI_CI_INDEX].virtPtr)) + qIdx * 4); /* ... and in the local structure we will use to copy to the HW configuration table */ /* CI base address */ inQueueCfg.elementPriSizeCount= config->inboundQueues[qIdx].numElements | (config->inboundQueues[qIdx].elementSize << SHIFT16) | (config->inboundQueues[qIdx].priority << SHIFT30); inQueueCfg.upperBaseAddress = saRoot->inboundQueue[qIdx].memoryRegion.physAddrUpper; inQueueCfg.lowerBaseAddress = saRoot->inboundQueue[qIdx].memoryRegion.physAddrLower; inQueueCfg.ciUpperBaseAddress = memoryAllocated->region[MPI_CI_INDEX].physAddrUpper; inQueueCfg.ciLowerBaseAddress = memoryAllocated->region[MPI_CI_INDEX].physAddrLower + qIdx * 4; /* write the configured data of inbound queue to SPC GSM */ mpiUpdateIBQueueCfgTable(agRoot, &inQueueCfg, MSGUCfgTblDWIdx, pcibar); /* get inbound PI PCI Bar and Offset */ /* get the PI PCI Bar offset and convert it to logical BAR */ IB_PIPCIBar = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + IB_PIPCI_BAR)); saRoot->inboundQueue[qIdx].PIPCIBar = mpiGetPCIBarIndex(agRoot, IB_PIPCIBar); saRoot->inboundQueue[qIdx].PIPCIOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + IB_PIPCI_BAR_OFFSET)); saRoot->inboundQueue[qIdx].qNumber = qIdx; memOffset += memSize; if ((0 == ((qIdx + 1) % MAX_QUEUE_EACH_MEM)) || (qIdx == (maxinbound - 1))) { mIdx++; indexoffset += MAX_QUEUE_EACH_MEM; memOffset = 0; } } /* else for memeory ok */ } /* queue enable */ } /* loop for inbound queue */ smTrace(hpDBG_VERY_LOUD,"73",0); /* TP:73 outbound queues */ /* index offset */ indexoffset = 0; memOffset = 0; /* Let's process the memory regions for the outbound queues */ for(qIdx = 0; qIdx < maxoutbound; qIdx++) { /* point back to the begin then plus offset to next queue */ MSGUCfgTblDWIdx = saveOffset; MSGUCfgTblDWIdx += outboundoffset; MSGUCfgTblDWIdx += (sizeof(spc_outboundQueueDescriptor_t) * qIdx); /* if the MPI configuration says that this queue is disabled ... */ if(0 == config->outboundQueues[qIdx].numElements) { /* ... Clears the configuration table for this queue */ outQueueCfg.upperBaseAddress = 0; outQueueCfg.lowerBaseAddress = 0; outQueueCfg.piUpperBaseAddress = 0; outQueueCfg.piLowerBaseAddress = 0; /* skip outQueueCfg.CIPCIBar = 0; read access only */ /* skip outQueueCfg.CIOffset = 0; read access only */ outQueueCfg.elementSizeCount = 0; outQueueCfg.interruptVecCntDelay = 0; /* Updated the configuration table in SPC GSM */ mpiUpdateOBQueueCfgTable(agRoot, &outQueueCfg, MSGUCfgTblDWIdx, pcibar); } /* If the outbound queue is enabled, then ... */ else { bit32 memSize = config->outboundQueues[qIdx].numElements * config->outboundQueues[qIdx].elementSize; bit32 remainder = memSize & 127; /* Calculate the size of this queue padded to 128 bytes */ if (remainder > 0) { memSize += (128 - remainder); } /* ... first checks that the memory region has the right size */ if((memoryAllocated->region[mIdx].totalLength - memOffset < memSize) || (NULL == memoryAllocated->region[mIdx].virtPtr) || (0 == memoryAllocated->region[mIdx].totalLength)) { SA_DBG1(("ERROR: The memory region does not have the right size for this outbound queue")); smTraceFuncExit(hpDBG_VERY_LOUD, 'e', "m3"); return AGSA_RC_FAILURE; } else { /* Then, using the MPI configuration argument, initializes the corresponding element on the MPI context ... */ saRoot->outboundQueue[qIdx].numElements = config->outboundQueues[qIdx].numElements; saRoot->outboundQueue[qIdx].elementSize = config->outboundQueues[qIdx].elementSize; si_memcpy(&saRoot->outboundQueue[qIdx].memoryRegion, &memoryAllocated->region[mIdx], sizeof(mpiMem_t)); saRoot->outboundQueue[qIdx].memoryRegion.virtPtr = (bit8 *)saRoot->outboundQueue[qIdx].memoryRegion.virtPtr + memOffset; saRoot->outboundQueue[qIdx].memoryRegion.physAddrLower += memOffset; saRoot->outboundQueue[qIdx].memoryRegion.elementSize = memSize; saRoot->outboundQueue[qIdx].memoryRegion.totalLength = memSize; saRoot->outboundQueue[qIdx].memoryRegion.numElements = 1; saRoot->outboundQueue[qIdx].producerIdx = 0; saRoot->outboundQueue[qIdx].consumerIdx = 0; saRoot->outboundQueue[qIdx].agRoot = agRoot; /* MPI memory region for outbound PIs are 3 */ saRoot->outboundQueue[qIdx].piPointer = (((bit8 *)(memoryAllocated->region[MPI_CI_INDEX + 1].virtPtr))+ qIdx * 4); /* ... and in the local structure we will use to copy to the HW configuration table */ outQueueCfg.upperBaseAddress = saRoot->outboundQueue[qIdx].memoryRegion.physAddrUpper; outQueueCfg.lowerBaseAddress = saRoot->outboundQueue[qIdx].memoryRegion.physAddrLower; /* PI base address */ outQueueCfg.piUpperBaseAddress = memoryAllocated->region[MPI_CI_INDEX + 1].physAddrUpper; outQueueCfg.piLowerBaseAddress = memoryAllocated->region[MPI_CI_INDEX + 1].physAddrLower + qIdx * 4; outQueueCfg.elementSizeCount = config->outboundQueues[qIdx].numElements | (config->outboundQueues[qIdx].elementSize << SHIFT16); /* enable/disable interrupt - use saSystemInterruptsActive() API */ /* instead of ossaHwRegWrite(agRoot, MSGU_ODMR, 0); */ /* Outbound Doorbell Auto disable */ /* LL does not use ossaHwRegWriteExt(agRoot, PCIBAR1, SPC_ODAR, 0xffffffff); */ if (config->outboundQueues[qIdx].interruptEnable) { /* enable interrupt flag bit30 of outbound table */ outQueueCfg.elementSizeCount |= OB_PROPERTY_INT_ENABLE; } if(smIS_SPCV(agRoot)) { outQueueCfg.interruptVecCntDelay = ((config->outboundQueues[qIdx].interruptVector & INT_VEC_BITS ) << SHIFT24); } else { outQueueCfg.interruptVecCntDelay = (config->outboundQueues[qIdx].interruptDelay & INT_DELAY_BITS) | ((config->outboundQueues[qIdx].interruptThreshold & INT_THR_BITS ) << SHIFT16) | ((config->outboundQueues[qIdx].interruptVector & INT_VEC_BITS ) << SHIFT24); } /* create a VectorIndex Bit Map */ if (qIdx < OQ_NUM_32) { saRoot->interruptVecIndexBitMap[config->outboundQueues[qIdx].interruptVector] |= (1 << qIdx); SA_DBG2(("mpiInitialize:below 32 saRoot->interruptVecIndexBitMap[config->outboundQueues[qIdx].interruptVector] 0x%08x\n",saRoot->interruptVecIndexBitMap[config->outboundQueues[qIdx].interruptVector])); } else { saRoot->interruptVecIndexBitMap1[config->outboundQueues[qIdx].interruptVector] |= (1 << (qIdx - OQ_NUM_32)); SA_DBG2(("mpiInitialize:Above 32 saRoot->interruptVecIndexBitMap1[config->outboundQueues[qIdx].interruptVector] 0x%08x\n",saRoot->interruptVecIndexBitMap1[config->outboundQueues[qIdx].interruptVector])); } /* Update the outbound configuration table */ mpiUpdateOBQueueCfgTable(agRoot, &outQueueCfg, MSGUCfgTblDWIdx, pcibar); /* read the CI PCIBar offset and convert it to logical bar */ OB_CIPCIBar = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + OB_CIPCI_BAR)); saRoot->outboundQueue[qIdx].CIPCIBar = mpiGetPCIBarIndex(agRoot, OB_CIPCIBar); saRoot->outboundQueue[qIdx].CIPCIOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + OB_CIPCI_BAR_OFFSET)); saRoot->outboundQueue[qIdx].DIntTOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + OB_DYNAMIC_COALES_OFFSET)); saRoot->outboundQueue[qIdx].qNumber = qIdx; memOffset += memSize; if ((0 == ((qIdx + 1) % MAX_QUEUE_EACH_MEM)) || (qIdx == (maxoutbound - 1))) { mIdx++; indexoffset += MAX_QUEUE_EACH_MEM; memOffset =0; } } } } /* calculate number of vectors */ saRoot->numInterruptVectors = 0; for (qIdx = 0; qIdx < MAX_NUM_VECTOR; qIdx++) { if ((saRoot->interruptVecIndexBitMap[qIdx]) || (saRoot->interruptVecIndexBitMap1[qIdx])) { (saRoot->numInterruptVectors)++; } } SA_DBG2(("mpiInitialize:(saRoot->numInterruptVectors) 0x%x\n",(saRoot->numInterruptVectors))); if(smIS_SPCV(agRoot)) { /* setup interrupt vector table */ mpiWrIntVecTable(agRoot,config); } if(smIS_SPCV(agRoot)) { mpiWrAnalogSetupTable(agRoot,config); } /* setup phy analog registers */ mpiWriteCALAll(agRoot, &config->phyAnalogConfig); { bit32 pcibar = 0; bit32 TableOffset; pcibar = siGetPciBar(agRoot); TableOffset = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); TableOffset &= SCRATCH_PAD0_OFFSET_MASK; SA_DBG1(("mpiInitialize: mpiContextTable TableOffset 0x%08X contains 0x%08X\n",TableOffset,ossaHwRegReadExt(agRoot, pcibar, TableOffset ))); SA_ASSERT( (ossaHwRegReadExt(agRoot, pcibar, TableOffset ) == 0x53434D50), "Config table signiture"); SA_DBG1(("mpiInitialize: AGSA_MPI_MAIN_CONFIGURATION_TABLE 0x%08X\n", 0)); SA_DBG1(("mpiInitialize: AGSA_MPI_GENERAL_STATUS_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_GST_OFFSET) & 0xFFFF ))); SA_DBG1(("mpiInitialize: AGSA_MPI_INBOUND_QUEUE_CONFIGURATION_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_IBQ_OFFSET) & 0xFFFF))); SA_DBG1(("mpiInitialize: AGSA_MPI_OUTBOUND_QUEUE_CONFIGURATION_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_OBQ_OFFSET) & 0xFFFF))); SA_DBG1(("mpiInitialize: AGSA_MPI_SAS_PHY_ANALOG_SETUP_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_ANALOG_SETUP_OFFSET) & 0xFFFF ))); SA_DBG1(("mpiInitialize: AGSA_MPI_INTERRUPT_VECTOR_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_INT_VEC_TABLE_OFFSET) & 0xFFFF))); SA_DBG1(("mpiInitialize: AGSA_MPI_PER_SAS_PHY_ATTRIBUTE_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_PHY_ATTRIBUTE_OFFSET) & 0xFFFF))); SA_DBG1(("mpiInitialize: AGSA_MPI_OUTBOUND_QUEUE_FAILOVER_TABLE 0x%08X\n", (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_MOQFOT_MOQFOES) & 0xFFFF))); } if(agNULL != saRoot->swConfig.mpiContextTable ) { agsaMPIContext_t * context = (agsaMPIContext_t * )saRoot->swConfig.mpiContextTable; bit32 length = saRoot->swConfig.mpiContextTablelen; bit32 pcibar = 0; bit32 TableOffset; pcibar = siGetPciBar(agRoot); TableOffset = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); TableOffset &= SCRATCH_PAD0_OFFSET_MASK; SA_DBG1(("mpiInitialize: mpiContextTable TableOffset 0x%08X contains 0x%08X\n",TableOffset,ossaHwRegReadExt(agRoot, pcibar, TableOffset ))); SA_ASSERT( (ossaHwRegReadExt(agRoot, pcibar, TableOffset ) == 0x53434D50), "Config table signiture"); if ( (ossaHwRegReadExt(agRoot, pcibar, TableOffset ) != 0x53434D50)) { SA_DBG1(("mpiInitialize: TableOffset 0x%x reads 0x%x expect 0x%x \n",TableOffset,ossaHwRegReadExt(agRoot, pcibar, TableOffset ),0x53434D50)); } if(context ) { SA_DBG1(("mpiInitialize: MPITableType 0x%x context->offset 0x%x context->value 0x%x\n",context->MPITableType,context->offset,context->value)); while( length != 0) { switch(context->MPITableType) { bit32 OffsetInMain; case AGSA_MPI_MAIN_CONFIGURATION_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_MAIN_CONFIGURATION_TABLE %d 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset, context->offset, context->value)); OffsetInMain = TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4) , context->value); break; case AGSA_MPI_GENERAL_STATUS_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_GENERAL_STATUS_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType ,TableOffset+MAIN_GST_OFFSET, context->offset, context->value )); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_GST_OFFSET ) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_INBOUND_QUEUE_CONFIGURATION_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_INBOUND_QUEUE_CONFIGURATION_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_IBQ_OFFSET, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_IBQ_OFFSET ) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_OUTBOUND_QUEUE_CONFIGURATION_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_OUTBOUND_QUEUE_CONFIGURATION_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_OBQ_OFFSET, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_OBQ_OFFSET ) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_SAS_PHY_ANALOG_SETUP_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_SAS_PHY_ANALOG_SETUP_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_ANALOG_SETUP_OFFSET, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+ MAIN_ANALOG_SETUP_OFFSET) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_INTERRUPT_VECTOR_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_INTERRUPT_VECTOR_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_INT_VEC_TABLE_OFFSET, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+ MAIN_INT_VEC_TABLE_OFFSET) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_PER_SAS_PHY_ATTRIBUTE_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_PER_SAS_PHY_ATTRIBUTE_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_PHY_ATTRIBUTE_OFFSET, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_PHY_ATTRIBUTE_OFFSET ) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; case AGSA_MPI_OUTBOUND_QUEUE_FAILOVER_TABLE: SA_DBG1(("mpiInitialize: AGSA_MPI_OUTBOUND_QUEUE_FAILOVER_TABLE %d offset 0x%x + 0x%x = 0x%x\n",context->MPITableType,TableOffset+MAIN_MOQFOT_MOQFOES, context->offset, context->value)); OffsetInMain = (ossaHwRegReadExt(agRoot, pcibar, TableOffset+MAIN_MOQFOT_MOQFOES ) & 0xFFFF) + TableOffset; ossaHwRegWriteExt(agRoot, pcibar, OffsetInMain + (context->offset * 4), context->value); break; default: SA_DBG1(("mpiInitialize: error MPITableType unknown %d offset 0x%x value 0x%x\n",context->MPITableType, context->offset, context->value)); break; } if(smIS_SPC12V(agRoot)) { if (saRoot->ControllerInfo.fwInterfaceRev > 0x301 ) { SA_DBG1(("mpiInitialize: MAIN_AWT_MIDRANGE 0x%08X\n", ossaHwRegReadExt(agRoot, pcibar, TableOffset + MAIN_AWT_MIDRANGE) )); } } if(length >= sizeof(agsaMPIContext_t)) { length -= sizeof(agsaMPIContext_t); context++; } else { length = 0; } } } SA_DBG1(("mpiInitialize: context %p saRoot->swConfig.mpiContextTable %p %d\n",context,saRoot->swConfig.mpiContextTable,context == saRoot->swConfig.mpiContextTable ? 1 : 0)); if ( (ossaHwRegReadExt(agRoot, pcibar, TableOffset ) != 0x53434D50)) { SA_DBG1(("mpiInitialize:TableOffset 0x%x reads 0x%x expect 0x%x \n",TableOffset,ossaHwRegReadExt(agRoot, pcibar, TableOffset ),0x53434D50)); } SA_ASSERT( (ossaHwRegReadExt(agRoot, pcibar, TableOffset ) == 0x53434D50), "Config table signiture After"); } /* At this point the Message Unit configuration table is set up. Now we need to ring the doorbell */ togglevalue = 0; smTrace(hpDBG_VERY_LOUD,"74", siHalRegReadExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET )); /* TP:74 Doorbell */ /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the table is updated */ siHalRegWriteExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE); if(siHalRegReadExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET ) & SPC_MSGU_CFG_TABLE_UPDATE) { SA_DBG1(("mpiInitialize: SPC_MSGU_CFG_TABLE_UPDATE (0x%X) \n", siHalRegReadExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET))); } else { SA_DBG1(("mpiInitialize: SPC_MSGU_CFG_TABLE_UPDATE not set (0x%X)\n", siHalRegReadExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET))); ossaStallThread(agRoot, WAIT_INCREMENT); } smTrace(hpDBG_VERY_LOUD,"A5", siHalRegReadExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET )); /* TP:A5 Doorbell */ /* // ossaHwRegWrite(agRoot, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE); MSGU_WRITE_IDR(SPC_MSGU_CFG_TABLE_UPDATE); */ /* wait until Inbound DoorBell Clear Register toggled */ WaitLonger: max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); value = MSGU_READ_IDR; value &= SPC_MSGU_CFG_TABLE_UPDATE; } while ((value != togglevalue) && (max_wait_count -= WAIT_INCREMENT)); smTrace(hpDBG_VERY_LOUD,"80", max_wait_count); /* TP:80 TP max_wait_count */ if (!max_wait_count && mpiStartToggleFailed < 5 ) { SA_DBG1(("mpiInitialize: mpiStartToggleFailed count %d\n", mpiStartToggleFailed)); mpiStartToggleFailed++; goto WaitLonger; } if (!max_wait_count ) { SA_DBG1(("mpiInitialize: TIMEOUT:IBDB value/toggle = 0x%x 0x%x\n", value, togglevalue)); MSGUCfgTblDWIdx = saveOffset; GSTLenMPIS = ossaHwRegReadExt(agRoot, pcibar, (bit32)MSGUCfgTblDWIdx + (bit32)(mainCfg.GSTOffset + GST_GSTLEN_MPIS_OFFSET)); SA_DBG1(("mpiInitialize: MPI State = 0x%x\n", GSTLenMPIS)); smTraceFuncExit(hpDBG_VERY_LOUD, 'f', "m3"); return AGSA_RC_FAILURE; } smTrace(hpDBG_VERY_LOUD,"81", mpiStartToggleFailed ); /* TP:81 TP */ /* check the MPI-State for initialization */ MSGUCfgTblDWIdx = saveOffset; GSTLenMPIS = ossaHwRegReadExt(agRoot, pcibar, (bit32)MSGUCfgTblDWIdx + (bit32)(mainCfg.GSTOffset + GST_GSTLEN_MPIS_OFFSET)); if ( (GST_MPI_STATE_UNINIT == (GSTLenMPIS & GST_MPI_STATE_MASK)) && ( mpiUnInitFailed < 5 ) ) { SA_DBG1(("mpiInitialize: MPI State = 0x%x mpiUnInitFailed count %d\n", GSTLenMPIS & GST_MPI_STATE_MASK,mpiUnInitFailed)); ossaStallThread(agRoot, (20 * 1000)); mpiUnInitFailed++; goto WaitLonger; } if (GST_MPI_STATE_INIT != (GSTLenMPIS & GST_MPI_STATE_MASK)) { SA_DBG1(("mpiInitialize: Error Not GST_MPI_STATE_INIT MPI State = 0x%x\n", GSTLenMPIS & GST_MPI_STATE_MASK)); smTraceFuncExit(hpDBG_VERY_LOUD, 'g', "m3"); return AGSA_RC_FAILURE; } smTrace(hpDBG_VERY_LOUD,"82", 0); /* TP:82 TP */ /* check MPI Initialization error */ GSTLenMPIS = GSTLenMPIS >> SHIFT16; if (0x0000 != GSTLenMPIS) { SA_DBG1(("mpiInitialize: MPI Error = 0x%x\n", GSTLenMPIS)); smTraceFuncExit(hpDBG_VERY_LOUD, 'h', "m3"); return AGSA_RC_FAILURE; } smTrace(hpDBG_VERY_LOUD,"83", 0); /* TP:83 TP */ /* reread IQ PI offset from SPC if IQ/OQ > 32 */ if ((maxinbound > IQ_NUM_32) || (maxoutbound > OQ_NUM_32)) { for(qIdx = 0; qIdx < maxinbound; qIdx++) { /* point back to the begin then plus offset to next queue */ MSGUCfgTblDWIdx = saveOffset; MSGUCfgTblDWIdx += inboundoffset; MSGUCfgTblDWIdx += (sizeof(spc_inboundQueueDescriptor_t) * qIdx); saRoot->inboundQueue[qIdx].PIPCIOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + IB_PIPCI_BAR_OFFSET)); } } smTrace(hpDBG_VERY_LOUD,"84", 0); /* TP:84 TP */ /* at least one inbound queue and one outbound queue enabled */ if ((0 == config->inboundQueues[0].numElements) || (0 == config->outboundQueues[0].numElements)) { SA_DBG1(("mpiInitialize: Error,IQ0 or OQ0 have to enable\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'i', "m3"); return AGSA_RC_FAILURE; } smTrace(hpDBG_VERY_LOUD,"85", 0); /* TP:85 TP */ /* clean the inbound queues */ for (i = 0; i < config->numInboundQueues; i ++) { if(0 != config->inboundQueues[i].numElements) { circularIQ = &saRoot->inboundQueue[i]; si_memset(circularIQ->memoryRegion.virtPtr, 0, circularIQ->memoryRegion.totalLength); si_memset(saRoot->inboundQueue[i].ciPointer, 0, sizeof(bit32)); if(smIS_SPCV(agRoot)) { ossaHwRegWriteExt(circularIQ->agRoot, circularIQ->PIPCIBar, circularIQ->PIPCIOffset, 0); SA_DBG1(("mpiInitialize: SPC V writes IQ %2d offset 0x%x\n",i ,circularIQ->PIPCIOffset)); } } } smTrace(hpDBG_VERY_LOUD,"86", 0); /* TP:86 TP */ /* clean the outbound queues */ for (i = 0; i < config->numOutboundQueues; i ++) { if(0 != config->outboundQueues[i].numElements) { circularOQ = &saRoot->outboundQueue[i]; si_memset(circularOQ->memoryRegion.virtPtr, 0, circularOQ->memoryRegion.totalLength); si_memset(saRoot->outboundQueue[i].piPointer, 0, sizeof(bit32)); if(smIS_SPCV(agRoot)) { ossaHwRegWriteExt(circularOQ->agRoot, circularOQ->CIPCIBar, circularOQ->CIPCIOffset, 0); SA_DBG2(("mpiInitialize: SPC V writes OQ %2d offset 0x%x\n",i ,circularOQ->CIPCIOffset)); } } } smTrace(hpDBG_VERY_LOUD,"75",0); /* TP:75 AAP1 IOP */ /* read back AAP1 and IOP event log address and size */ MSGUCfgTblDWIdx = saveOffset; value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_HI)); saRoot->mainConfigTable.upperEventLogAddress = value; SA_DBG1(("mpiInitialize: upperEventLogAddress 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_ADDR_LO)); saRoot->mainConfigTable.lowerEventLogAddress = value; SA_DBG1(("mpiInitialize: lowerEventLogAddress 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_BUFF_SIZE)); saRoot->mainConfigTable.eventLogSize = value; SA_DBG1(("mpiInitialize: eventLogSize 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_EVENT_LOG_OPTION)); saRoot->mainConfigTable.eventLogOption = value; SA_DBG1(("mpiInitialize: eventLogOption 0x%x\n", value)); SA_DBG1(("mpiInitialize: EventLog dd /p %08X`%08X L %x\n",saRoot->mainConfigTable.upperEventLogAddress,saRoot->mainConfigTable.lowerEventLogAddress,saRoot->mainConfigTable.eventLogSize/4 )); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_HI)); saRoot->mainConfigTable.upperIOPeventLogAddress = value; SA_DBG1(("mpiInitialize: upperIOPLogAddress 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_ADDR_LO)); saRoot->mainConfigTable.lowerIOPeventLogAddress = value; SA_DBG1(("mpiInitialize: lowerIOPLogAddress 0x%x\n", value)); SA_DBG1(("mpiInitialize: IOPLog dd /p %08X`%08X L %x\n",saRoot->mainConfigTable.upperIOPeventLogAddress,saRoot->mainConfigTable.lowerIOPeventLogAddress,saRoot->mainConfigTable.IOPeventLogSize/4 )); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_BUFF_SIZE)); saRoot->mainConfigTable.IOPeventLogSize = value; SA_DBG1(("mpiInitialize: IOPeventLogSize 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IOP_EVENT_LOG_OPTION)); saRoot->mainConfigTable.IOPeventLogOption = value; SA_DBG1(("mpiInitialize: IOPeventLogOption 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_INTERRUPT)); #ifdef SA_PRINTOUT_IN_WINDBG #ifndef DBG DbgPrint("mpiInitialize: EventLog (%d) dd /p %08X`%08X L %x\n", saRoot->mainConfigTable.eventLogOption, saRoot->mainConfigTable.upperEventLogAddress, saRoot->mainConfigTable.lowerEventLogAddress, saRoot->mainConfigTable.eventLogSize/4 ); DbgPrint("mpiInitialize: IOPLog (%d) dd /p %08X`%08X L %x\n", saRoot->mainConfigTable.IOPeventLogOption, saRoot->mainConfigTable.upperIOPeventLogAddress, saRoot->mainConfigTable.lowerIOPeventLogAddress, saRoot->mainConfigTable.IOPeventLogSize/4 ); #endif /* DBG */ #endif /* SA_PRINTOUT_IN_WINDBG */ saRoot->mainConfigTable.FatalErrorInterrupt = value; smTrace(hpDBG_VERY_LOUD,"76",value); /* TP:76 FatalErrorInterrupt */ SA_DBG1(("mpiInitialize: hwConfig->hwOption %X\n", saRoot->hwConfig.hwOption )); SA_DBG1(("mpiInitialize: FatalErrorInterrupt 0x%x\n", value)); /* read back Register Dump offset and length */ value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP0_OFFSET)); saRoot->mainConfigTable.FatalErrorDumpOffset0 = value; SA_DBG1(("mpiInitialize: FatalErrorDumpOffset0 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP0_LENGTH)); saRoot->mainConfigTable.FatalErrorDumpLength0 = value; SA_DBG1(("mpiInitialize: FatalErrorDumpLength0 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP1_OFFSET)); saRoot->mainConfigTable.FatalErrorDumpOffset1 = value; SA_DBG1(("mpiInitialize: FatalErrorDumpOffset1 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP1_LENGTH)); saRoot->mainConfigTable.FatalErrorDumpLength1 = value; SA_DBG1(("mpiInitialize: FatalErrorDumpLength1 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_PRECTD_PRESETD)); saRoot->mainConfigTable.PortRecoveryTimerPortResetTimer = value; SA_DBG1(("mpiInitialize: PortRecoveryTimerPortResetTimer 0x%x\n", value)); value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(MSGUCfgTblDWIdx + MAIN_IRAD_RESERVED)); saRoot->mainConfigTable.InterruptReassertionDelay = value; SA_DBG1(("mpiInitialize: InterruptReassertionDelay 0x%x\n", value)); if(smIS_SPCV(agRoot)) { bit32 sp1; sp1= ossaHwRegRead(agRoot,V_Scratchpad_1_Register ); if(SCRATCH_PAD1_V_ERROR_STATE(sp1)) { SA_DBG1(("mpiInitialize: SCRATCH_PAD1_V_ERROR_STAT 0x%x\n",sp1 )); ret = AGSA_RC_FAILURE; } } smTraceFuncExit(hpDBG_VERY_LOUD, 'j', "m3"); return ret; } /*******************************************************************************/ /** \fn mpiWaitForConfigTable(agsaRoot_t *agRoot, spc_configMainDescriptor_t *config) * \brief Reading and Writing the Configuration Table * \param agsaRoot Pointer to a data structure containing LL layer context handles * \param config Pointer to Configuration Table * * Return: * AGSA_RC_SUCCESS if read the configuration table from SPC sucessful * AGSA_RC_FAILURE if read the configuration table from SPC failed */ /*******************************************************************************/ GLOBAL bit32 mpiWaitForConfigTable(agsaRoot_t *agRoot, spc_configMainDescriptor_t *config) { agsaLLRoot_t *saRoot = (agsaLLRoot_t *)(agRoot->sdkData); bit32 MSGUCfgTblBase, ret = AGSA_RC_SUCCESS; bit32 CfgTblDWIdx; bit32 value, value1; bit32 max_wait_time; bit32 max_wait_count; bit32 Signature, ExpSignature; bit8 pcibar; SA_DBG2(("mpiWaitForConfigTable: Entering\n")); SA_ASSERT(NULL != agRoot, "agRoot argument cannot be null"); smTraceFuncEnter(hpDBG_VERY_LOUD,"m4"); /* check error state */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_1,MSGU_SCRATCH_PAD_1); value1 = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_2); if( smIS_SPC(agRoot) ) { SA_DBG1(("mpiWaitForConfigTable: Waiting for SPC FW becoming ready.P1 0x%X P2 0x%X\n",value,value1)); /* check AAP error */ if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) { /* error state */ SA_DBG1(("mpiWaitForConfigTable: AAP error state and code 0x%x, ScratchPad2=0x%x\n", value, value1)); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD0 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD3 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m4"); return AGSA_RC_FAILURE; } /* check IOP error */ if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) { /* error state */ SA_DBG1(("mpiWaitForConfigTable: IOP error state and code 0x%x, ScratchPad1=0x%x\n", value1, value)); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD0 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD3 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "m4"); return AGSA_RC_FAILURE; } /* bit 4-31 of scratch pad1 should be zeros if it is not in error state */ #ifdef DONT_DO /* */ if (value & SCRATCH_PAD1_STATE_MASK) { /* error case */ SA_DBG1(("mpiWaitForConfigTable: wrong state failure, scratchPad1 0x%x\n", value)); SA_DBG1(("mpiWaitForConfigTable: ScratchPad0 AAP error code 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD2 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD3 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "m4"); return AGSA_RC_FAILURE; } /* bit 4-31 of scratch pad2 should be zeros if it is not in error state */ if (value1 & SCRATCH_PAD2_STATE_MASK) { /* error case */ SA_DBG1(("mpiWaitForConfigTable: wrong state failure, scratchPad2 0x%x\n", value1)); SA_DBG1(("mpiWaitForConfigTable: ScratchPad3 IOP error code 0x%x\n",siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3) )); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD0 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD1 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_1,MSGU_SCRATCH_PAD_1))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "m4"); return AGSA_RC_FAILURE; } #endif /* DONT_DO */ /* checking the fw and IOP in ready state */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec timeout */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); /* wait until scratch pad 1 and 2 registers in ready state */ do { ossaStallThread(agRoot, WAIT_INCREMENT); value =siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_1,MSGU_SCRATCH_PAD_1) & SCRATCH_PAD1_RDY; value1 =siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_2) & SCRATCH_PAD2_RDY; if(smIS_SPCV(agRoot)) { SA_DBG1(("mpiWaitForConfigTable:VEN_DEV_SPCV force SCRATCH_PAD2 RDY 1 %08X 2 %08X\n" ,value,value1)); value1 =3; } if ((max_wait_count -= WAIT_INCREMENT) == 0) { SA_DBG1(("mpiWaitForConfigTable: Timeout!! SCRATCH_PAD1/2 value = 0x%x 0x%x\n", value, value1)); break; } } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY)); if (!max_wait_count) { SA_DBG1(("mpiWaitForConfigTable: timeout failure\n")); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD0 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiWaitForConfigTable: SCRATCH_PAD3 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'e', "m4"); return AGSA_RC_FAILURE; } }else { if(((value & SCRATCH_PAD1_V_BOOTSTATE_HDA_SEEPROM ) == SCRATCH_PAD1_V_BOOTSTATE_HDA_SEEPROM)) { SA_DBG1(("mpiWaitForConfigTable: HDA mode set in SEEPROM SP1 0x%X\n",value)); } if(((value & SCRATCH_PAD1_V_READY) != SCRATCH_PAD1_V_READY) || (value == 0xffffffff)) { SA_DBG1(("mpiWaitForConfigTable: Waiting for _V_ FW becoming ready.P1 0x%X P2 0x%X\n",value,value1)); /* checking the fw and IOP in ready state */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec timeout */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); /* wait until scratch pad 1 and 2 registers in ready state */ do { ossaStallThread(agRoot, WAIT_INCREMENT); value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_1,MSGU_SCRATCH_PAD_1); value1 = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_2); if ((max_wait_count -= WAIT_INCREMENT) == 0) { SA_DBG1(("mpiWaitForConfigTable: Timeout!! SCRATCH_PAD1/2 value = 0x%x 0x%x\n", value, value1)); return AGSA_RC_FAILURE; } } while (((value & SCRATCH_PAD1_V_READY) != SCRATCH_PAD1_V_READY) || (value == 0xffffffff)); } } SA_DBG1(("mpiWaitForConfigTable: FW Ready, SCRATCH_PAD1/2 value = 0x%x 0x%x\n", value, value1)); /* read scratch pad0 to get PCI BAR and offset of configuration table */ MSGUCfgTblBase = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); /* get offset */ CfgTblDWIdx = MSGUCfgTblBase & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ MSGUCfgTblBase = (MSGUCfgTblBase & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; if(smIS_SPC(agRoot)) { if( smIS_spc8081(agRoot)) { if (BAR4 != MSGUCfgTblBase) { SA_DBG1(("mpiWaitForConfigTable: smIS_spc8081 PCI BAR is not BAR4, bar=0x%x - failure\n", MSGUCfgTblBase)); smTraceFuncExit(hpDBG_VERY_LOUD, 'f', "m4"); return AGSA_RC_FAILURE; } } else { if (BAR5 != MSGUCfgTblBase) { SA_DBG1(("mpiWaitForConfigTable: PCI BAR is not BAR5, bar=0x%x - failure\n", MSGUCfgTblBase)); smTraceFuncExit(hpDBG_VERY_LOUD, 'g', "m4"); return AGSA_RC_FAILURE; } } } /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, MSGUCfgTblBase); /* read signature from the configuration table */ Signature = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx); /* Error return if the signature is not "PMCS" */ ExpSignature = ('P') | ('M' << SHIFT8) | ('C' << SHIFT16) | ('S' << SHIFT24); if (Signature != ExpSignature) { SA_DBG1(("mpiWaitForConfigTable: Signature value = 0x%x\n", Signature)); smTraceFuncExit(hpDBG_VERY_LOUD, 'h', "m4"); return AGSA_RC_FAILURE; } /* save Signature */ si_memcpy(&config->Signature, &Signature, sizeof(Signature)); /* read Interface Revsion from the configuration table */ config->InterfaceRev = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_INTERFACE_REVISION); /* read FW Revsion from the configuration table */ config->FWRevision = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_FW_REVISION); /* read Max Outstanding IO from the configuration table */ config->MaxOutstandingIO = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_MAX_OUTSTANDING_IO_OFFSET); /* read Max SGL and Max Devices from the configuration table */ config->MDevMaxSGL = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_MAX_SGL_OFFSET); /* read Controller Cap Flags from the configuration table */ config->ContrlCapFlag = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_CNTRL_CAP_OFFSET); /* read GST Table Offset from the configuration table */ config->GSTOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_GST_OFFSET); /* read Inbound Queue Offset from the configuration table */ config->inboundQueueOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_IBQ_OFFSET); /* read Outbound Queue Offset from the configuration table */ config->outboundQueueOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_OBQ_OFFSET); if(smIS_SPCV(agRoot)) { ;/* SPCV - reserved field */ } else { /* read HDA Flags from the configuration table */ config->HDAModeFlags = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_HDA_FLAGS_OFFSET); } /* read analog Setting offset from the configuration table */ config->analogSetupTblOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_ANALOG_SETUP_OFFSET); if(smIS_SPCV(agRoot)) { ;/* SPCV - reserved field */ /* read interrupt vector table offset */ config->InterruptVecTblOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_INT_VEC_TABLE_OFFSET); /* read phy attribute table offset */ config->phyAttributeTblOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_PHY_ATTRIBUTE_OFFSET); SA_DBG1(("mpiWaitForConfigTable: INT Vector Tble Offset = 0x%x\n", config->InterruptVecTblOffset)); SA_DBG1(("mpiWaitForConfigTable: Phy Attribute Tble Offset = 0x%x\n", config->phyAttributeTblOffset)); } else { ;/* SPC - Not used */ } /* read Error Dump Offset and Length */ config->FatalErrorDumpOffset0 = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP0_OFFSET); config->FatalErrorDumpLength0 = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP0_LENGTH); config->FatalErrorDumpOffset1 = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP1_OFFSET); config->FatalErrorDumpLength1 = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_FATAL_ERROR_RDUMP1_LENGTH); SA_DBG1(("mpiWaitForConfigTable: Interface Revision value = 0x%08x\n", config->InterfaceRev)); SA_DBG1(("mpiWaitForConfigTable: FW Revision value = 0x%08x\n", config->FWRevision)); if(smIS_SPC(agRoot)) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%08x\n", STSDK_LL_SPC_VERSION)); } if(smIS_SPC6V(agRoot)) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%08x\n",STSDK_LL_VERSION )); } if(smIS_SPC12V(agRoot)) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%08x\n",STSDK_LL_12G_VERSION )); } SA_DBG1(("mpiWaitForConfigTable: MaxOutstandingIO value = 0x%08x\n", config->MaxOutstandingIO)); SA_DBG1(("mpiWaitForConfigTable: MDevMaxSGL value = 0x%08x\n", config->MDevMaxSGL)); SA_DBG1(("mpiWaitForConfigTable: ContrlCapFlag value = 0x%08x\n", config->ContrlCapFlag)); SA_DBG1(("mpiWaitForConfigTable: GSTOffset value = 0x%08x\n", config->GSTOffset)); SA_DBG1(("mpiWaitForConfigTable: inboundQueueOffset value = 0x%08x\n", config->inboundQueueOffset)); SA_DBG1(("mpiWaitForConfigTable: outboundQueueOffset value = 0x%08x\n", config->outboundQueueOffset)); SA_DBG1(("mpiWaitForConfigTable: FatalErrorDumpOffset0 value = 0x%08x\n", config->FatalErrorDumpOffset0)); SA_DBG1(("mpiWaitForConfigTable: FatalErrorDumpLength0 value = 0x%08x\n", config->FatalErrorDumpLength0)); SA_DBG1(("mpiWaitForConfigTable: FatalErrorDumpOffset1 value = 0x%08x\n", config->FatalErrorDumpOffset1)); SA_DBG1(("mpiWaitForConfigTable: FatalErrorDumpLength1 value = 0x%08x\n", config->FatalErrorDumpLength1)); SA_DBG1(("mpiWaitForConfigTable: HDAModeFlags value = 0x%08x\n", config->HDAModeFlags)); SA_DBG1(("mpiWaitForConfigTable: analogSetupTblOffset value = 0x%08x\n", config->analogSetupTblOffset)); /* check interface version */ if(smIS_SPC6V(agRoot)) { if (config->InterfaceRev != STSDK_LL_INTERFACE_VERSION) { SA_DBG1(("mpiWaitForConfigTable: V sTSDK interface ver. 0x%x does not match InterfaceRev 0x%x warning!\n", STSDK_LL_INTERFACE_VERSION, config->InterfaceRev)); ret = AGSA_RC_VERSION_UNTESTED; if ((config->InterfaceRev & STSDK_LL_INTERFACE_VERSION_IGNORE_MASK) != (STSDK_LL_INTERFACE_VERSION & STSDK_LL_INTERFACE_VERSION_IGNORE_MASK)) { SA_DBG1(("mpiWaitForConfigTable: V sTSDK interface ver. 0x%x incompatible with InterfaceRev 0x%x warning!\n", STSDK_LL_INTERFACE_VERSION, config->InterfaceRev)); ret = AGSA_RC_VERSION_INCOMPATIBLE; smTraceFuncExit(hpDBG_VERY_LOUD, 'i', "m4"); return ret; } } } else if(smIS_SPC12V(agRoot)) { if (config->InterfaceRev != STSDK_LL_12G_INTERFACE_VERSION) { SA_DBG1(("mpiWaitForConfigTable: 12g V sTSDK interface ver. 0x%x does not match InterfaceRev 0x%x warning!\n", STSDK_LL_12G_INTERFACE_VERSION, config->InterfaceRev)); ret = AGSA_RC_VERSION_UNTESTED; if ((config->InterfaceRev & STSDK_LL_INTERFACE_VERSION_IGNORE_MASK) != (STSDK_LL_12G_INTERFACE_VERSION & STSDK_LL_INTERFACE_VERSION_IGNORE_MASK)) { SA_DBG1(("mpiWaitForConfigTable: V sTSDK interface ver. 0x%x incompatible with InterfaceRev 0x%x warning!\n", STSDK_LL_12G_INTERFACE_VERSION, config->InterfaceRev)); ret = AGSA_RC_VERSION_INCOMPATIBLE; ret = AGSA_RC_VERSION_UNTESTED; smTraceFuncExit(hpDBG_VERY_LOUD, 'j', "m4"); return ret; } } } else { if (config->InterfaceRev != STSDK_LL_OLD_INTERFACE_VERSION) { SA_DBG1(("mpiWaitForConfigTable: SPC sTSDK interface ver. 0x%08x not compatible with InterfaceRev 0x%x warning!\n", STSDK_LL_INTERFACE_VERSION, config->InterfaceRev)); ret = AGSA_RC_VERSION_INCOMPATIBLE; smTraceFuncExit(hpDBG_VERY_LOUD, 'k', "m4"); return ret; } } /* Check FW versions */ if(smIS_SPC6V(agRoot)) { SA_DBG1(("mpiWaitForConfigTable:6 sTSDK ver. sa.h 0x%08x config 0x%08x\n", STSDK_LL_VERSION, config->FWRevision)); /* check FW and LL sTSDK version */ if (config->FWRevision != MATCHING_V_FW_VERSION ) { if (config->FWRevision > MATCHING_V_FW_VERSION) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x hadn't tested with FW ver. 0x%08x warning!\n", STSDK_LL_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } else if (config->FWRevision < MIN_FW_SPCVE_VERSION_SUPPORTED) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x not compatible with FW ver. 0x%08x warning!\n", STSDK_LL_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_INCOMPATIBLE; smTraceFuncExit(hpDBG_VERY_LOUD, 'l', "m4"); return ret; } else { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x mismatch with FW ver. 0x%08x warning!\n",STSDK_LL_VERSION , config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } } }else if(smIS_SPC12V(agRoot)) { SA_DBG1(("mpiWaitForConfigTable:12 sTSDK ver. sa.h 0x%08x config 0x%08x\n", STSDK_LL_12G_VERSION, config->FWRevision)); /* check FW and LL sTSDK version */ if (config->FWRevision != MATCHING_12G_V_FW_VERSION ) { if (config->FWRevision > MATCHING_12G_V_FW_VERSION) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x hadn't tested with FW ver. 0x%08x warning!\n", STSDK_LL_12G_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } else if (config->FWRevision < MIN_FW_12G_SPCVE_VERSION_SUPPORTED) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x not compatible with FW ver. 0x%08x warning!\n", STSDK_LL_12G_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_INCOMPATIBLE; smTraceFuncExit(hpDBG_VERY_LOUD, 'm', "m4"); return ret; } else { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x mismatch with FW ver. 0x%08x warning!\n",STSDK_LL_12G_VERSION , config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } } } else { if (config->FWRevision != MATCHING_SPC_FW_VERSION ) { if (config->FWRevision > MATCHING_SPC_FW_VERSION) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x hadn't tested with FW ver. 0x%08x warning!\n", STSDK_LL_SPC_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } else if (config->FWRevision < MIN_FW_SPC_VERSION_SUPPORTED) { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x not compatible with FW ver. 0x%08x warning!\n", STSDK_LL_SPC_VERSION, config->FWRevision)); ret = AGSA_RC_VERSION_INCOMPATIBLE; smTraceFuncExit(hpDBG_VERY_LOUD, 'n', "m4"); return ret; } else { SA_DBG1(("mpiWaitForConfigTable: sTSDK ver. 0x%x mismatch with FW ver. 0x%08x warning!\n",STSDK_LL_SPC_VERSION , config->FWRevision)); ret = AGSA_RC_VERSION_UNTESTED; } } } SA_DBG1(("mpiWaitForConfigTable: ILA version 0x%08X\n", ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_ILAT_ILAV_ILASMRN_ILAMRN_ILAMJN) )); if(smIS_SPC12V(agRoot)) { if (config->InterfaceRev > 0x301 ) { SA_DBG1(("mpiWaitForConfigTable: MAIN_INACTIVE_ILA_REVSION 0x%08X\n", ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_INACTIVE_ILA_REVSION) )); SA_DBG1(("mpiWaitForConfigTable: MAIN_SEEPROM_REVSION 0x%08X\n", ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_SEEPROM_REVSION) )); } } if(smIS_SPC12V(agRoot)) { if (config->InterfaceRev > 0x301 ) { SA_DBG1(("mpiWaitForConfigTable: MAIN_AWT_MIDRANGE 0x%08X\n", ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_AWT_MIDRANGE) )); } } if(smIS_SFC(agRoot)) { /* always success for SFC*/ ret = AGSA_RC_SUCCESS; } if (agNULL != saRoot) { /* save the information */ saRoot->ControllerInfo.signature = Signature; saRoot->ControllerInfo.fwInterfaceRev = config->InterfaceRev; if(smIS_SPCV(agRoot)) { saRoot->ControllerInfo.hwRevision = (ossaHwRegReadConfig32(agRoot,8) & 0xFF); SA_DBG1(("mpiWaitForConfigTable: hwRevision 0x%x\n",saRoot->ControllerInfo.hwRevision )); } else { saRoot->ControllerInfo.hwRevision = SPC_READ_DEV_REV; } saRoot->ControllerInfo.fwRevision = config->FWRevision; saRoot->ControllerInfo.ilaRevision = config->ilaRevision; saRoot->ControllerInfo.maxPendingIO = config->MaxOutstandingIO; saRoot->ControllerInfo.maxSgElements = config->MDevMaxSGL & 0xFFFF; saRoot->ControllerInfo.maxDevices = (config->MDevMaxSGL & MAX_DEV_BITS) >> SHIFT16; saRoot->ControllerInfo.queueSupport = config->ContrlCapFlag & Q_SUPPORT_BITS; saRoot->ControllerInfo.phyCount = (bit8)((config->ContrlCapFlag & PHY_COUNT_BITS) >> SHIFT19); saRoot->ControllerInfo.sasSpecsSupport = (config->ContrlCapFlag & SAS_SPEC_BITS) >> SHIFT25; SA_DBG1(("mpiWaitForConfigTable: MaxOutstandingIO 0x%x swConfig->maxActiveIOs 0x%x\n", config->MaxOutstandingIO,saRoot->swConfig.maxActiveIOs )); if(smIS_SPCV(agRoot)) { ;/* SPCV - reserved field */ } else { saRoot->ControllerInfo.controllerSetting = (bit8)config->HDAModeFlags; } saRoot->ControllerInfo.sdkInterfaceRev = STSDK_LL_INTERFACE_VERSION; saRoot->ControllerInfo.sdkRevision = STSDK_LL_VERSION; saRoot->mainConfigTable.regDumpPCIBAR = pcibar; saRoot->mainConfigTable.FatalErrorDumpOffset0 = config->FatalErrorDumpOffset0; saRoot->mainConfigTable.FatalErrorDumpLength0 = config->FatalErrorDumpLength0; saRoot->mainConfigTable.FatalErrorDumpOffset1 = config->FatalErrorDumpOffset1; saRoot->mainConfigTable.FatalErrorDumpLength1 = config->FatalErrorDumpLength1; if(smIS_SPCV(agRoot)) { ;/* SPCV - reserved field */ } else { saRoot->mainConfigTable.HDAModeFlags = config->HDAModeFlags; } saRoot->mainConfigTable.analogSetupTblOffset = config->analogSetupTblOffset; if(smIS_SPCV(agRoot)) { saRoot->mainConfigTable.InterruptVecTblOffset = config->InterruptVecTblOffset; saRoot->mainConfigTable.phyAttributeTblOffset = config->phyAttributeTblOffset; saRoot->mainConfigTable.PortRecoveryTimerPortResetTimer = config->portRecoveryResetTimer; } SA_DBG1(("mpiWaitForConfigTable: Signature = 0x%x\n", Signature)); SA_DBG1(("mpiWaitForConfigTable: hwRevision = 0x%x\n", saRoot->ControllerInfo.hwRevision)); SA_DBG1(("mpiWaitForConfigTable: FW Revision = 0x%x\n", config->FWRevision)); SA_DBG1(("mpiWaitForConfigTable: Max Sgl = 0x%x\n", saRoot->ControllerInfo.maxSgElements)); SA_DBG1(("mpiWaitForConfigTable: Max Device = 0x%x\n", saRoot->ControllerInfo.maxDevices)); SA_DBG1(("mpiWaitForConfigTable: Queue Support = 0x%x\n", saRoot->ControllerInfo.queueSupport)); SA_DBG1(("mpiWaitForConfigTable: Phy Count = 0x%x\n", saRoot->ControllerInfo.phyCount)); SA_DBG1(("mpiWaitForConfigTable: sas Specs Support = 0x%x\n", saRoot->ControllerInfo.sasSpecsSupport)); } if(ret != AGSA_RC_SUCCESS ) { SA_DBG1(("mpiWaitForConfigTable: return 0x%x not AGSA_RC_SUCCESS warning!\n", ret)); } smTraceFuncExit(hpDBG_VERY_LOUD, 'o', "m4"); return ret; } /*******************************************************************************/ /** \fn mpiUnInitConfigTable(agsaRoot_t *agRoot, spc_configMainDescriptor_t *config) * \brief UnInitialization Configuration Table * \param agsaRoot Pointer to a data structure containing LL layer context handles * * Return: * AGSA_RC_SUCCESS if Un-initialize the configuration table sucessful * AGSA_RC_FAILURE if Un-initialize the configuration table failed */ /*******************************************************************************/ GLOBAL bit32 mpiUnInitConfigTable(agsaRoot_t *agRoot) { bit32 MSGUCfgTblBase; bit32 CfgTblDWIdx, GSTOffset, GSTLenMPIS; bit32 value, togglevalue; bit32 max_wait_time; bit32 max_wait_count; bit8 pcibar; smTraceFuncEnter(hpDBG_VERY_LOUD,"m7"); SA_DBG1(("mpiUnInitConfigTable: agRoot %p\n",agRoot)); SA_ASSERT(NULL != agRoot, "agRoot argument cannot be null"); togglevalue = 0; /* read scratch pad0 to get PCI BAR and offset of configuration table */ MSGUCfgTblBase =siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); if(MSGUCfgTblBase == 0xFFFFFFFF) { SA_DBG1(("mpiUnInitConfigTable: MSGUCfgTblBase = 0x%x AGSA_RC_FAILURE\n",MSGUCfgTblBase)); return AGSA_RC_FAILURE; } /* get offset */ CfgTblDWIdx = MSGUCfgTblBase & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ MSGUCfgTblBase = (MSGUCfgTblBase & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, MSGUCfgTblBase); /* Write bit 1 to Inbound DoorBell Register */ siHalRegWriteExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET); /* wait until Inbound DoorBell Clear Register toggled */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); value = MSGU_READ_IDR; value &= SPC_MSGU_CFG_TABLE_RESET; } while ((value != togglevalue) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("mpiUnInitConfigTable: TIMEOUT:IBDB value/toggle = 0x%x 0x%x\n", value, togglevalue)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m7"); if(smIS_SPC(agRoot) ) { return AGSA_RC_FAILURE; } } /* check the MPI-State for termination in progress */ /* wait until Inbound DoorBell Clear Register toggled */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); GSTOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + MAIN_GST_OFFSET); do { ossaStallThread(agRoot, WAIT_INCREMENT); if(GSTOffset == 0xFFFFFFFF) { SA_DBG1(("mpiUnInitConfigTable:AGSA_RC_FAILURE GSTOffset = 0x%x\n",GSTOffset)); return AGSA_RC_FAILURE; } GSTLenMPIS = ossaHwRegReadExt(agRoot, pcibar, (bit32)CfgTblDWIdx + (bit32)(GSTOffset + GST_GSTLEN_MPIS_OFFSET)); if (GST_MPI_STATE_UNINIT == (GSTLenMPIS & GST_MPI_STATE_MASK)) { break; } } while (max_wait_count -= WAIT_INCREMENT); if (!max_wait_count) { SA_DBG1(("mpiUnInitConfigTable: TIMEOUT, MPI State = 0x%x\n", GSTLenMPIS & GST_MPI_STATE_MASK)); #if defined(SALLSDK_DEBUG) SA_DBG1(("mpiUnInitConfigTable: SCRATCH_PAD0 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiUnInitConfigTable: SCRATCH_PAD1 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1))); SA_DBG1(("mpiUnInitConfigTable: SCRATCH_PAD2 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_2))); SA_DBG1(("mpiUnInitConfigTable: SCRATCH_PAD3 value = 0x%x\n", ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_3))); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "m7"); return AGSA_RC_FAILURE; } smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "m7"); return AGSA_RC_SUCCESS; } /*******************************************************************************/ /** \fn void mpiUpdateIBQueueCfgTable(agsaRoot_t *agRoot, spc_inboundQueueDescriptor_t *outQueueCfg, * bit32 QueueTableOffset,bit8 pcibar) * \brief Writing to the inbound queue of the Configuration Table * \param agsaRoot Pointer to a data structure containing both application and LL layer context handles * \param outQueueCfg Pointer to inbuond configuration area * \param QueueTableOffset Queue configuration table offset * \param pcibar PCI BAR * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiUpdateIBQueueCfgTable(agsaRoot_t *agRoot, spc_inboundQueueDescriptor_t *inQueueCfg, bit32 QueueTableOffset, bit8 pcibar) { smTraceFuncEnter(hpDBG_VERY_LOUD,"m5"); smTrace(hpDBG_VERY_LOUD,"Ba",QueueTableOffset); /* TP:Ba QueueTableOffset */ smTrace(hpDBG_VERY_LOUD,"Bb",pcibar); /* TP:Bb pcibar */ ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + IB_PROPERITY_OFFSET), inQueueCfg->elementPriSizeCount); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + IB_BASE_ADDR_HI_OFFSET), inQueueCfg->upperBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + IB_BASE_ADDR_LO_OFFSET), inQueueCfg->lowerBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + IB_CI_BASE_ADDR_HI_OFFSET), inQueueCfg->ciUpperBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + IB_CI_BASE_ADDR_LO_OFFSET), inQueueCfg->ciLowerBaseAddress); SA_DBG3(("mpiUpdateIBQueueCfgTable: Offset 0x%08x elementPriSizeCount 0x%x\n",(bit32)(QueueTableOffset + IB_PROPERITY_OFFSET), inQueueCfg->elementPriSizeCount)); SA_DBG3(("mpiUpdateIBQueueCfgTable: Offset 0x%08x upperBaseAddress 0x%x\n",(bit32)(QueueTableOffset + IB_BASE_ADDR_HI_OFFSET), inQueueCfg->upperBaseAddress)); SA_DBG3(("mpiUpdateIBQueueCfgTable: Offset 0x%08x lowerBaseAddress 0x%x\n",(bit32)(QueueTableOffset + IB_BASE_ADDR_LO_OFFSET), inQueueCfg->lowerBaseAddress)); SA_DBG3(("mpiUpdateIBQueueCfgTable: Offset 0x%08x ciUpperBaseAddress 0x%x\n",(bit32)(QueueTableOffset + IB_CI_BASE_ADDR_HI_OFFSET), inQueueCfg->ciUpperBaseAddress)); SA_DBG3(("mpiUpdateIBQueueCfgTable: Offset 0x%08x ciLowerBaseAddress 0x%x\n",(bit32)(QueueTableOffset + IB_CI_BASE_ADDR_LO_OFFSET), inQueueCfg->ciLowerBaseAddress)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m5"); } /*******************************************************************************/ /** \fn void mpiUpdateOBQueueCfgTable(agsaRoot_t *agRoot, spc_outboundQueueDescriptor_t *outQueueCfg, * bit32 QueueTableOffset,bit8 pcibar) * \brief Writing to the inbound queue of the Configuration Table * \param agsaRoot Pointer to a data structure containing both application * and LL layer context handles * \param outQueueCfg Pointer to outbuond configuration area * \param QueueTableOffset Queue configuration table offset * \param pcibar PCI BAR * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiUpdateOBQueueCfgTable(agsaRoot_t *agRoot, spc_outboundQueueDescriptor_t *outQueueCfg, bit32 QueueTableOffset, bit8 pcibar) { smTraceFuncEnter(hpDBG_VERY_LOUD,"m8"); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_PROPERITY_OFFSET), outQueueCfg->elementSizeCount); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_BASE_ADDR_HI_OFFSET), outQueueCfg->upperBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_BASE_ADDR_LO_OFFSET), outQueueCfg->lowerBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_PI_BASE_ADDR_HI_OFFSET), outQueueCfg->piUpperBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_PI_BASE_ADDR_LO_OFFSET), outQueueCfg->piLowerBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(QueueTableOffset + OB_INTERRUPT_COALES_OFFSET), outQueueCfg->interruptVecCntDelay); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x elementSizeCount 0x%x\n",(bit32)(QueueTableOffset + OB_PROPERITY_OFFSET), outQueueCfg->elementSizeCount)); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x upperBaseAddress 0x%x\n",(bit32)(QueueTableOffset + OB_BASE_ADDR_HI_OFFSET), outQueueCfg->upperBaseAddress)); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x lowerBaseAddress 0x%x\n",(bit32)(QueueTableOffset + OB_BASE_ADDR_LO_OFFSET), outQueueCfg->lowerBaseAddress)); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x piUpperBaseAddress 0x%x\n",(bit32)(QueueTableOffset + OB_PI_BASE_ADDR_HI_OFFSET), outQueueCfg->piUpperBaseAddress)); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x piLowerBaseAddress 0x%x\n",(bit32)(QueueTableOffset + OB_PI_BASE_ADDR_LO_OFFSET), outQueueCfg->piLowerBaseAddress)); SA_DBG3(("mpiUpdateOBQueueCfgTable: Offset 0x%08x interruptVecCntDelay 0x%x\n",(bit32)(QueueTableOffset + OB_INTERRUPT_COALES_OFFSET), outQueueCfg->interruptVecCntDelay)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m8"); } /*******************************************************************************/ /** \fn void mpiUpdateOBQueueCfgTable(agsaRoot_t *agRoot, spc_outboundQueueDescriptor_t *outQueueCfg, * bit32 QueueTableOffset,bit8 pcibar) * \brief Writing to the inbound queue of the Configuration Table * \param agsaRoot Pointer to a data structure containing both application * and LL layer context handles * \param outQueueCfg Pointer to outbuond configuration area * \param QueueTableOffset Queue configuration table offset * \param pcibar PCI BAR * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiUpdateFatalErrorTable(agsaRoot_t *agRoot, bit32 FerrTableOffset, bit32 lowerBaseAddress, bit32 upperBaseAddress, bit32 length, bit8 pcibar) { smTraceFuncEnter(hpDBG_VERY_LOUD,"2U"); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(FerrTableOffset + MPI_FATAL_EDUMP_TABLE_LO_OFFSET), lowerBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(FerrTableOffset + MPI_FATAL_EDUMP_TABLE_HI_OFFSET), upperBaseAddress); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(FerrTableOffset + MPI_FATAL_EDUMP_TABLE_LENGTH), length); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(FerrTableOffset + MPI_FATAL_EDUMP_TABLE_HANDSHAKE), 0); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(FerrTableOffset + MPI_FATAL_EDUMP_TABLE_STATUS), 0); SA_DBG3(("mpiUpdateFatalErrorTable: Offset 0x%08x MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x%x\n",FerrTableOffset + MPI_FATAL_EDUMP_TABLE_LO_OFFSET, lowerBaseAddress)); SA_DBG3(("mpiUpdateFatalErrorTable: Offset 0x%08x MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x%x\n",FerrTableOffset + MPI_FATAL_EDUMP_TABLE_HI_OFFSET,upperBaseAddress )); SA_DBG3(("mpiUpdateFatalErrorTable: Offset 0x%08x MPI_FATAL_EDUMP_TABLE_LENGTH 0x%x\n",FerrTableOffset + MPI_FATAL_EDUMP_TABLE_LENGTH, length)); SA_DBG3(("mpiUpdateFatalErrorTable: Offset 0x%08x MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x%x\n",FerrTableOffset + MPI_FATAL_EDUMP_TABLE_HANDSHAKE,0 )); SA_DBG3(("mpiUpdateFatalErrorTable: Offset 0x%08x MPI_FATAL_EDUMP_TABLE_STATUS 0x%x\n",FerrTableOffset + MPI_FATAL_EDUMP_TABLE_STATUS,0 )); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "2U"); } /*******************************************************************************/ /** \fn bit32 mpiGetPCIBarIndex(agsaRoot_t *agRoot, pciBar) * \brief Get PCI BAR Index from PCI BAR * \param agsaRoot Pointer to a data structure containing both application and LL layer context handles * \param pciBar - PCI BAR * * Return: * PCI BAR Index */ /*******************************************************************************/ GLOBAL bit32 mpiGetPCIBarIndex(agsaRoot_t *agRoot, bit32 pciBar) { switch(pciBar) { case BAR0: case BAR1: pciBar = PCIBAR0; break; case BAR2: case BAR3: pciBar = PCIBAR1; break; case BAR4: pciBar = PCIBAR2; break; case BAR5: pciBar = PCIBAR3; break; default: pciBar = PCIBAR0; break; } return pciBar; } /*******************************************************************************/ /** \fn void mpiReadGSTTable(agsaRoot_t *agRoot, spc_GSTableDescriptor_t *mpiGSTable) * \brief Reading the General Status Table * * \param agsaRoot Handles for this instance of SAS/SATA LLL * \param mpiGSTable Pointer of General Status Table * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiReadGSTable(agsaRoot_t *agRoot, spc_GSTableDescriptor_t *mpiGSTable) { bit32 CFGTableOffset, TableOffset; bit32 GSTableOffset; bit8 i, pcibar; smTraceFuncEnter(hpDBG_VERY_LOUD,"m9"); /* get offset of the configuration table */ TableOffset = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); if(0xFFFFFFFF == TableOffset) { SA_ASSERT(0xFFFFFFFF == TableOffset, "Chip PCI dead"); SA_DBG1(("mpiReadGSTable: Chip PCI dead TableOffset 0x%x\n", TableOffset)); return; } // SA_DBG1(("mpiReadGSTable: TableOffset 0x%x\n", TableOffset)); CFGTableOffset = TableOffset & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ TableOffset = (TableOffset & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, TableOffset); /* read GST Table Offset from the configuration table */ GSTableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_GST_OFFSET); // SA_DBG1(("mpiReadGSTable: GSTableOffset 0x%x\n",GSTableOffset )); GSTableOffset = CFGTableOffset + GSTableOffset; mpiGSTable->GSTLenMPIS = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_GSTLEN_MPIS_OFFSET)); mpiGSTable->IQFreezeState0 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_IQ_FREEZE_STATE0_OFFSET)); mpiGSTable->IQFreezeState1 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_IQ_FREEZE_STATE1_OFFSET)); mpiGSTable->MsguTcnt = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_MSGUTCNT_OFFSET)); mpiGSTable->IopTcnt = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_IOPTCNT_OFFSET)); mpiGSTable->Iop1Tcnt = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_IOP1TCNT_OFFSET)); SA_DBG4(("mpiReadGSTable: GSTLenMPIS 0x%x\n", mpiGSTable->GSTLenMPIS)); SA_DBG4(("mpiReadGSTable: GSTLen 0x%x\n", (mpiGSTable->GSTLenMPIS & 0xfff8) >> SHIFT3)); SA_DBG4(("mpiReadGSTable: IQFreezeState0 0x%x\n", mpiGSTable->IQFreezeState0)); SA_DBG4(("mpiReadGSTable: IQFreezeState1 0x%x\n", mpiGSTable->IQFreezeState1)); SA_DBG4(("mpiReadGSTable: MsguTcnt 0x%x\n", mpiGSTable->MsguTcnt)); SA_DBG4(("mpiReadGSTable: IopTcnt 0x%x\n", mpiGSTable->IopTcnt)); SA_DBG4(("mpiReadGSTable: Iop1Tcnt 0x%x\n", mpiGSTable->Iop1Tcnt)); if(smIS_SPCV(agRoot)) { /***** read Phy State from SAS Phy Attribute Table */ TableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_PHY_ATTRIBUTE_OFFSET); TableOffset &= 0x00FFFFFF; TableOffset = TableOffset + CFGTableOffset; for (i = 0; i < 8; i++) { mpiGSTable->PhyState[i] = ossaHwRegReadExt(agRoot, pcibar, (bit32)(TableOffset + i * sizeof(phyAttrb_t))); SA_DBG4(("mpiReadGSTable: PhyState[0x%x] 0x%x\n", i, mpiGSTable->PhyState[i])); } } else { for (i = 0; i < 8; i++) { mpiGSTable->PhyState[i] = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_PHYSTATE_OFFSET + i * 4)); SA_DBG4(("mpiReadGSTable: PhyState[0x%x] 0x%x\n", i, mpiGSTable->PhyState[i])); } } mpiGSTable->GPIOpins = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_GPIO_PINS_OFFSET)); SA_DBG4(("mpiReadGSTable: GPIOpins 0x%x\n", mpiGSTable->GPIOpins)); for (i = 0; i < 8; i++) { mpiGSTable->recoverErrInfo[i] = ossaHwRegReadExt(agRoot, pcibar, (bit32)(GSTableOffset + GST_RERRINFO_OFFSET)); SA_DBG4(("mpiReadGSTable: recoverErrInfo[0x%x] 0x%x\n", i, mpiGSTable->recoverErrInfo[i])); } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m9"); } /*******************************************************************************/ /** \fn void siInitResources(agsaRoot_t *agRoot) * Initialization of LL resources * * \param agsaRoot Handles for this instance of SAS/SATA LLL * \param memoryAllocated Point to the data structure that holds the different * chunks of memory that are required * * Return: * None */ /*******************************************************************************/ GLOBAL void siInitResources(agsaRoot_t *agRoot, agsaMemoryRequirement_t *memoryAllocated, agsaHwConfig_t *hwConfig, agsaSwConfig_t *swConfig, bit32 usecsPerTick) { agsaLLRoot_t *saRoot; agsaDeviceDesc_t *pDeviceDesc; agsaIORequestDesc_t *pRequestDesc; agsaTimerDesc_t *pTimerDesc; agsaPort_t *pPort; agsaPortMap_t *pPortMap; agsaDeviceMap_t *pDeviceMap; agsaIOMap_t *pIOMap; bit32 maxNumIODevices; bit32 i, j; mpiICQueue_t *circularIQ; mpiOCQueue_t *circularOQ; if (agNULL == agRoot) { return; } /* Get the saRoot memory address */ saRoot = (agsaLLRoot_t *) (memoryAllocated->agMemory[LLROOT_MEM_INDEX].virtPtr); agRoot->sdkData = (void *) saRoot; /* Setup Device link */ /* Save the information of allocated device Link memory */ saRoot->deviceLinkMem = memoryAllocated->agMemory[DEVICELINK_MEM_INDEX]; si_memset(saRoot->deviceLinkMem.virtPtr, 0, saRoot->deviceLinkMem.totalLength); SA_DBG2(("siInitResources: [%d] saRoot->deviceLinkMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n" , DEVICELINK_MEM_INDEX, saRoot->deviceLinkMem.virtPtr, saRoot->deviceLinkMem.phyAddrLower, saRoot->deviceLinkMem.numElements, saRoot->deviceLinkMem.totalLength, saRoot->deviceLinkMem.type)); maxNumIODevices = swConfig->numDevHandles; SA_DBG2(("siInitResources: maxNumIODevices=%d, swConfig->numDevHandles=%d \n", maxNumIODevices, swConfig->numDevHandles)); /* Setup free IO Devices link list */ saLlistInitialize(&(saRoot->freeDevicesList)); for ( i = 0; i < (bit32) maxNumIODevices; i ++ ) { /* get the pointer to the device descriptor */ pDeviceDesc = (agsaDeviceDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->deviceLinkMem), i); /* Initialize device descriptor */ saLlinkInitialize(&(pDeviceDesc->linkNode)); pDeviceDesc->initiatorDevHandle.osData = agNULL; pDeviceDesc->initiatorDevHandle.sdkData = agNULL; pDeviceDesc->targetDevHandle.osData = agNULL; pDeviceDesc->targetDevHandle.sdkData = agNULL; pDeviceDesc->deviceType = SAS_SATA_UNKNOWN_DEVICE; pDeviceDesc->pPort = agNULL; pDeviceDesc->DeviceMapIndex = 0; saLlistInitialize(&(pDeviceDesc->pendingIORequests)); /* Add the device descriptor to the free IO device link list */ saLlistAdd(&(saRoot->freeDevicesList), &(pDeviceDesc->linkNode)); } /* Setup IO Request link */ /* Save the information of allocated IO Request Link memory */ saRoot->IORequestMem = memoryAllocated->agMemory[IOREQLINK_MEM_INDEX]; si_memset(saRoot->IORequestMem.virtPtr, 0, saRoot->IORequestMem.totalLength); SA_DBG2(("siInitResources: [%d] saRoot->IORequestMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", IOREQLINK_MEM_INDEX, saRoot->IORequestMem.virtPtr, saRoot->IORequestMem.phyAddrLower, saRoot->IORequestMem.numElements, saRoot->IORequestMem.totalLength, saRoot->IORequestMem.type)); /* Setup free IO Request link list */ saLlistIOInitialize(&(saRoot->freeIORequests)); saLlistIOInitialize(&(saRoot->freeReservedRequests)); for ( i = 0; i < swConfig->maxActiveIOs; i ++ ) { /* get the pointer to the request descriptor */ pRequestDesc = (agsaIORequestDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->IORequestMem), i); /* Initialize request descriptor */ saLlinkIOInitialize(&(pRequestDesc->linkNode)); pRequestDesc->valid = agFALSE; pRequestDesc->requestType = AGSA_REQ_TYPE_UNKNOWN; pRequestDesc->pIORequestContext = agNULL; pRequestDesc->HTag = i; pRequestDesc->pDevice = agNULL; pRequestDesc->pPort = agNULL; /* Add the request descriptor to the free IO Request link list */ /* Add the request descriptor to the free Reserved Request link list */ /* SMP request must get service so reserve one request when first SMP completes */ if(saLlistIOGetCount(&(saRoot->freeReservedRequests)) < SA_RESERVED_REQUEST_COUNT) { saLlistIOAdd(&(saRoot->freeReservedRequests), &(pRequestDesc->linkNode)); } else { saLlistIOAdd(&(saRoot->freeIORequests), &(pRequestDesc->linkNode)); } } /* Setup timer link */ /* Save the information of allocated timer Link memory */ saRoot->timerLinkMem = memoryAllocated->agMemory[TIMERLINK_MEM_INDEX]; si_memset(saRoot->timerLinkMem.virtPtr, 0, saRoot->timerLinkMem.totalLength); SA_DBG2(("siInitResources: [%d] saRoot->timerLinkMem VirtPtr=%p PhysicalLo=%x Count=%x Total=%x type %x\n", TIMERLINK_MEM_INDEX, saRoot->timerLinkMem.virtPtr, saRoot->timerLinkMem.phyAddrLower, saRoot->timerLinkMem.numElements, saRoot->timerLinkMem.totalLength, saRoot->timerLinkMem.type)); /* Setup free timer link list */ saLlistInitialize(&(saRoot->freeTimers)); for ( i = 0; i < NUM_TIMERS; i ++ ) { /* get the pointer to the timer descriptor */ pTimerDesc = (agsaTimerDesc_t *) AGSAMEM_ELEMENT_READ(&(saRoot->timerLinkMem), i); /* Initialize timer descriptor */ saLlinkInitialize(&(pTimerDesc->linkNode)); pTimerDesc->valid = agFALSE; pTimerDesc->timeoutTick = 0; pTimerDesc->pfnTimeout = agNULL; pTimerDesc->Event = 0; pTimerDesc->pParm = agNULL; /* Add the timer descriptor to the free timer link list */ saLlistAdd(&(saRoot->freeTimers), &(pTimerDesc->linkNode)); } /* Setup valid timer link list */ saLlistInitialize(&(saRoot->validTimers)); /* Setup Phys */ /* Setup PhyCount */ saRoot->phyCount = (bit8) hwConfig->phyCount; /* Init Phy data structure */ for ( i = 0; i < saRoot->phyCount; i ++ ) { saRoot->phys[i].pPort = agNULL; saRoot->phys[i].phyId = (bit8) i; /* setup phy status is PHY_STOPPED */ PHY_STATUS_SET(&(saRoot->phys[i]), PHY_STOPPED); } /* Setup Ports */ /* Setup PortCount */ saRoot->portCount = saRoot->phyCount; /* Setup free port link list */ saLlistInitialize(&(saRoot->freePorts)); for ( i = 0; i < saRoot->portCount; i ++ ) { /* get the pointer to the port */ pPort = &(saRoot->ports[i]); /* Initialize port */ saLlinkInitialize(&(pPort->linkNode)); pPort->portContext.osData = agNULL; pPort->portContext.sdkData = pPort; pPort->portId = 0; pPort->portIdx = (bit8) i; pPort->status = PORT_NORMAL; for ( j = 0; j < saRoot->phyCount; j ++ ) { pPort->phyMap[j] = agFALSE; } saLlistInitialize(&(pPort->listSASATADevices)); /* Add the port to the free port link list */ saLlistAdd(&(saRoot->freePorts), &(pPort->linkNode)); } /* Setup valid port link list */ saLlistInitialize(&(saRoot->validPorts)); /* Init sysIntsActive */ saRoot->sysIntsActive = agFALSE; /* setup timer tick granunarity */ saRoot->usecsPerTick = usecsPerTick; /* initialize LL timer tick */ saRoot->timeTick = 0; /* initialize device (de)registration callback fns */ saRoot->DeviceRegistrationCB = agNULL; saRoot->DeviceDeregistrationCB = agNULL; /* Initialize the PortMap for port context */ for ( i = 0; i < saRoot->portCount; i ++ ) { pPortMap = &(saRoot->PortMap[i]); pPortMap->PortContext = agNULL; pPortMap->PortID = PORT_MARK_OFF; pPortMap->PortStatus = PORT_NORMAL; saRoot->autoDeregDeviceflag[i] = 0; } /* Initialize the DeviceMap for device handle */ for ( i = 0; i < MAX_IO_DEVICE_ENTRIES; i ++ ) { pDeviceMap = &(saRoot->DeviceMap[i]); pDeviceMap->DeviceHandle = agNULL; pDeviceMap->DeviceIdFromFW = i; } /* Initialize the IOMap for IOrequest */ for ( i = 0; i < MAX_ACTIVE_IO_REQUESTS; i ++ ) { pIOMap = &(saRoot->IOMap[i]); pIOMap->IORequest = agNULL; pIOMap->Tag = MARK_OFF; } /* clean the inbound queues */ for (i = 0; i < saRoot->QueueConfig.numInboundQueues; i ++) { if(0 != saRoot->inboundQueue[i].numElements) { circularIQ = &saRoot->inboundQueue[i]; si_memset(circularIQ->memoryRegion.virtPtr, 0, circularIQ->memoryRegion.totalLength); si_memset(saRoot->inboundQueue[i].ciPointer, 0, sizeof(bit32)); } } /* clean the outbound queues */ for (i = 0; i < saRoot->QueueConfig.numOutboundQueues; i ++) { if(0 != saRoot->outboundQueue[i].numElements) { circularOQ = &saRoot->outboundQueue[i]; si_memset(circularOQ->memoryRegion.virtPtr, 0, circularOQ->memoryRegion.totalLength); si_memset(saRoot->outboundQueue[i].piPointer, 0, sizeof(bit32)); circularOQ->producerIdx = 0; circularOQ->consumerIdx = 0; SA_DBG3(("siInitResource: Q %d Clean PI 0x%03x CI 0x%03x\n", i,circularOQ->producerIdx, circularOQ->consumerIdx)); } } return; } /*******************************************************************************/ /** \fn void mpiReadCALTable(agsaRoot_t *agRoot, * spc_SPASTable_t *mpiCALTable, bit32 index) * \brief Reading the Phy Analog Setup Register Table * \param agsaRoot Handles for this instance of SAS/SATA LLL * \param mpiCALTable Pointer of Phy Calibration Table * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiReadCALTable(agsaRoot_t *agRoot, spc_SPASTable_t *mpiCALTable, bit32 index) { bit32 CFGTableOffset, TableOffset; bit32 CALTableOffset; bit8 pcibar; /* get offset of the configuration table */ TableOffset = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); CFGTableOffset = TableOffset & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ TableOffset = (TableOffset & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, TableOffset); /* read Calibration Table Offset from the configuration table */ CALTableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_ANALOG_SETUP_OFFSET); if(smIS_SPCV(agRoot)) { CALTableOffset &= 0x00FFFFFF; } CALTableOffset = CFGTableOffset + CALTableOffset + (index * ANALOG_SETUP_ENTRY_SIZE * 4); mpiCALTable->spaReg0 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG1_OFFSET)); mpiCALTable->spaReg1 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG2_OFFSET)); mpiCALTable->spaReg2 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG3_OFFSET)); mpiCALTable->spaReg3 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_CFG_OFFSET)); mpiCALTable->spaReg4 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_PORT_CFG1_OFFSET)); mpiCALTable->spaReg5 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_PORT_CFG2_OFFSET)); mpiCALTable->spaReg6 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_CFG1_OFFSET)); mpiCALTable->spaReg7 = ossaHwRegReadExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_CFG2_OFFSET)); SA_DBG3(("mpiReadCALTable: spaReg0 0x%x\n", mpiCALTable->spaReg0)); SA_DBG3(("mpiReadCALTable: spaReg1 0x%x\n", mpiCALTable->spaReg1)); SA_DBG3(("mpiReadCALTable: spaReg2 0x%x\n", mpiCALTable->spaReg2)); SA_DBG3(("mpiReadCALTable: spaReg3 0x%x\n", mpiCALTable->spaReg3)); SA_DBG3(("mpiReadCALTable: spaReg4 0x%x\n", mpiCALTable->spaReg4)); SA_DBG3(("mpiReadCALTable: spaReg5 0x%x\n", mpiCALTable->spaReg5)); SA_DBG3(("mpiReadCALTable: spaReg6 0x%x\n", mpiCALTable->spaReg6)); SA_DBG3(("mpiReadCALTable: spaReg7 0x%x\n", mpiCALTable->spaReg7)); } /*******************************************************************************/ /** \fn void mpiWriteCALTable(agsaRoot_t *agRoot, * spc_SPASTable_t *mpiCALTable, index) * \brief Writing the Phy Analog Setup Register Table * \param agsaRoot Handles for this instance of SAS/SATA LLL * \param mpiCALTable Pointer of Phy Calibration Table * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiWriteCALTable(agsaRoot_t *agRoot, spc_SPASTable_t *mpiCALTable, bit32 index) { bit32 CFGTableOffset, TableOffset; bit32 CALTableOffset; bit8 pcibar; smTraceFuncEnter(hpDBG_VERY_LOUD,"m6"); /* get offset of the configuration table */ TableOffset = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); CFGTableOffset = TableOffset & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ TableOffset = (TableOffset & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, TableOffset); /* read Calibration Table Offset from the configuration table */ CALTableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_ANALOG_SETUP_OFFSET); if(smIS_SPCV(agRoot)) { CALTableOffset &= 0x00FFFFFF; } CALTableOffset = CFGTableOffset + CALTableOffset + (index * ANALOG_SETUP_ENTRY_SIZE * 4); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG1_OFFSET), mpiCALTable->spaReg0); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG2_OFFSET), mpiCALTable->spaReg1); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_PORT_CFG3_OFFSET), mpiCALTable->spaReg2); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + TX_CFG_OFFSET), mpiCALTable->spaReg3); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_PORT_CFG1_OFFSET), mpiCALTable->spaReg4); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_PORT_CFG2_OFFSET), mpiCALTable->spaReg5); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_CFG1_OFFSET), mpiCALTable->spaReg6); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(CALTableOffset + RV_CFG2_OFFSET), mpiCALTable->spaReg7); SA_DBG4(("mpiWriteCALTable: Offset 0x%08x spaReg0 0x%x 0x%x 0x%x 0x%x\n",(bit32)(CALTableOffset + TX_PORT_CFG1_OFFSET), mpiCALTable->spaReg0, mpiCALTable->spaReg1, mpiCALTable->spaReg2, mpiCALTable->spaReg3)); SA_DBG4(("mpiWriteCALTable: Offset 0x%08x spaReg4 0x%x 0x%x 0x%x 0x%x\n",(bit32)(CALTableOffset + RV_PORT_CFG1_OFFSET), mpiCALTable->spaReg4, mpiCALTable->spaReg5, mpiCALTable->spaReg6, mpiCALTable->spaReg7)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "m6"); } /*******************************************************************************/ /** \fn void mpiWriteCALAll(agsaRoot_t *agRoot, * agsaPhyAnalogSetupTable_t *mpiCALTable) * \brief Writing the Phy Analog Setup Register Table * \param agsaRoot Handles for this instance of SAS/SATA LLL * \param mpiCALTable Pointer of Phy Calibration Table * * Return: * None */ /*******************************************************************************/ GLOBAL void mpiWriteCALAll(agsaRoot_t *agRoot, agsaPhyAnalogSetupTable_t *mpiCALTable) { bit8 i; smTraceFuncEnter(hpDBG_VERY_LOUD,"mz"); if(smIS_SPCV(agRoot)) { smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "mz"); return; } for (i = 0; i < MAX_INDEX; i++) { mpiWriteCALTable(agRoot, (spc_SPASTable_t *)&mpiCALTable->phyAnalogSetupRegisters[i], i); } smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "mz"); } GLOBAL void mpiWrAnalogSetupTable(agsaRoot_t *agRoot, mpiConfig_t *config ) { bit32 AnalogTableBase,CFGTableOffset, value,phy; bit32 AnalogtableSize; bit8 pcibar; value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); pcibar = (bit8)mpiGetPCIBarIndex(agRoot, value); CFGTableOffset = value & SCRATCH_PAD0_OFFSET_MASK; AnalogtableSize = AnalogTableBase = ossaHwRegReadExt(agRoot,pcibar , (bit32)CFGTableOffset + MAIN_ANALOG_SETUP_OFFSET); AnalogtableSize &= 0xFF000000; AnalogtableSize >>= SHIFT24; AnalogTableBase &= 0x00FFFFFF; AnalogTableBase = CFGTableOffset + AnalogTableBase; // config->phyAnalogConfig.phyAnalogSetupRegisters[0].spaRegister0 = 0; SA_DBG1(("mpiWrAnalogSetupTable:Analogtable Base Offset %08X pcibar %d\n",AnalogTableBase, pcibar )); SA_DBG1(("mpiWrAnalogSetupTable:%d %d\n",(int)sizeof(agsaPhyAnalogSetupRegisters_t), AnalogtableSize)); for(phy = 0; phy < 10; phy++) /* upto 10 phys See PM*/ { ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 0 ),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister0 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 4 ),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister1 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 8 ),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister2 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 12),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister3 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 16),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister4 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 20),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister5 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 24),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister6 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 28),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister7 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 32),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister8 ); ossaHwRegWriteExt(agRoot, pcibar,(AnalogTableBase + ( AnalogtableSize * phy)+ 36),config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister9 ); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister0 0x%x 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) + 0,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister0 ,ossaHwRegReadExt(agRoot, pcibar,AnalogTableBase + ( AnalogtableSize * phy)+ 0 ))); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister1 0x%x 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) + 4,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister1 ,ossaHwRegReadExt(agRoot, pcibar,AnalogTableBase + ( AnalogtableSize * phy)+ 4 ))); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister2 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) + 8,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister2 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister3 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +12,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister3 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister4 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +16,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister4 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister5 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +20,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister5 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister6 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +24,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister6 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister7 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +28,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister7 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister8 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +32,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister8 )); SA_DBG4(("mpiWrAnalogSetupTable:phy %d Offset 0x%08x spaRegister9 0x%x\n",phy, (bit32) AnalogTableBase+ (AnalogtableSize * phy) +36,config->phyAnalogConfig.phyAnalogSetupRegisters[phy].spaRegister9 )); } } GLOBAL void mpiWrIntVecTable(agsaRoot_t *agRoot, mpiConfig_t* config ) { bit32 CFGTableOffset, value; bit32 INTVTableOffset; bit32 ValuetoWrite; bit8 pcibar, i,obq; /* get offset of the configuration table */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); CFGTableOffset = value & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ value = (value & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, value); /* read Interrupt Table Offset from the main configuration table */ INTVTableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_INT_VEC_TABLE_OFFSET); INTVTableOffset &= 0x00FFFFFF; INTVTableOffset = CFGTableOffset + INTVTableOffset; SA_DBG1(("mpiWrIntVecTable: Base Offset %08X\n",(bit32)(INTVTableOffset + INT_VT_Coal_CNT_TO ) )); for (i = 0; i < MAX_NUM_VECTOR; i ++) { bit32 found=0; for (obq = 0; obq < MAX_NUM_VECTOR; obq++) { /* find OBQ for vector i */ if( config->outboundQueues[obq].interruptVector == i ) { found=1; break; } } if(!found ) { continue; } ValuetoWrite = (( config->outboundQueues[obq].interruptDelay << SHIFT15) | config->outboundQueues[obq].interruptThreshold ); ossaHwRegWriteExt(agRoot, pcibar, (bit32)(INTVTableOffset + INT_VT_Coal_CNT_TO + i * sizeof(InterruptVT_t)), ValuetoWrite ); SA_DBG3(("mpiWrIntVecTable: Q %d interruptDelay 0x%X interruptThreshold 0x%X \n",i, config->outboundQueues[i].interruptDelay, config->outboundQueues[i].interruptThreshold )); SA_DBG3(("mpiWrIntVecTable: %d INT_VT_Coal_CNT_TO Bar %d Offset %3X Writing 0x%08x\n",i, pcibar, (bit32)(INTVTableOffset + INT_VT_Coal_CNT_TO + i * sizeof(InterruptVT_t)), ValuetoWrite)); } for (i = 0; i < MAX_NUM_VECTOR; i++) { /* read interrupt colescing control and timer */ value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(INTVTableOffset + INT_VT_Coal_CNT_TO + i * sizeof(InterruptVT_t))); SA_DBG4(("mpiWrIntVecTable: Offset 0x%08x Interrupt Colescing iccict[%02d] 0x%x\n", (bit32)(INTVTableOffset + INT_VT_Coal_CNT_TO + i * sizeof(InterruptVT_t)), i, value)); } } GLOBAL void mpiWrPhyAttrbTable(agsaRoot_t *agRoot, sasPhyAttribute_t *phyAttrib) { bit32 CFGTableOffset, value; bit32 PHYTableOffset; bit8 pcibar, i; /* get offset of the configuration table */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0); CFGTableOffset = value & SCRATCH_PAD0_OFFSET_MASK; /* get PCI BAR */ value = (value & SCRATCH_PAD0_BAR_MASK) >> SHIFT26; /* convert the PCI BAR to logical bar number */ pcibar = (bit8)mpiGetPCIBarIndex(agRoot, value); /* read Phy Attribute Table Offset from the configuration table */ PHYTableOffset = ossaHwRegReadExt(agRoot, pcibar, (bit32)CFGTableOffset + MAIN_PHY_ATTRIBUTE_OFFSET); PHYTableOffset &=0x00FFFFFF; PHYTableOffset = CFGTableOffset + PHYTableOffset + PHY_EVENT_OQ; SA_DBG1(("mpiWrPhyAttrbTable: PHYTableOffset 0x%08x\n", PHYTableOffset)); /* write OQ event per phy */ for (i = 0; i < MAX_VALID_PHYS; i ++) { ossaHwRegWriteExt(agRoot, pcibar, (bit32)(PHYTableOffset + i * sizeof(phyAttrb_t)), phyAttrib->phyAttribute[i].phyEventOQ); SA_DBG3(("mpiWrPhyAttrbTable:%d Offset 0x%08x phyAttribute 0x%x\n",i,(bit32)(PHYTableOffset + i * sizeof(phyAttrb_t)), phyAttrib->phyAttribute[i].phyEventOQ )); } for (i = 0; i < MAX_VALID_PHYS; i ++) { value = ossaHwRegReadExt(agRoot, pcibar, (bit32)(PHYTableOffset + i * sizeof(phyAttrb_t))); SA_DBG1(("mpiWrPhyAttrbTable: OQ Event per phy[%x] 0x%x\n", i, value)); } } #ifdef TEST /******************************************************************/ /*******************************************************************************/ /** \fn mpiFreezeInboundQueue(agsaRoot_t *agRoot) * \brief Freeze the inbound queue * * \param agRoot Handles for this instance of SAS/SATA hardware * \param bitMapQueueNum0 bit map for inbound queue number 0 - 31 to freeze * \param bitMapQueueNum1 bit map for inbound queue number 32 - 63 to freeze * * Return: * AGSA_RC_SUCCESS if Un-initialize the configuration table sucessful * AGSA_RC_FAILURE if Un-initialize the configuration table failed */ /*******************************************************************************/ GLOBAL bit32 mpiFreezeInboundQueue(agsaRoot_t *agRoot, bit32 bitMapQueueNum0, bit32 bitMapQueueNum1) { bit32 value, togglevalue; bit32 max_wait_time; bit32 max_wait_count; SA_DBG2(("Entering function:mpiFreezeInboundQueue\n")); SA_ASSERT(NULL != agRoot, "agRoot argument cannot be null"); togglevalue = 0; if (bitMapQueueNum0) { /* update the inbound queue number to HOST_SCRATCH_PAD1 register for queue 0 to 31 */ SA_DBG1(("mpiFreezeInboundQueue: SCRATCH_PAD0 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_0))); SA_DBG1(("mpiFreezeInboundQueue: SCRATCH_PAD3 value = 0x%x\n", siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_3,MSGU_SCRATCH_PAD_3))); value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_0,MSGU_SCRATCH_PAD_1); value |= bitMapQueueNum0; siHalRegWriteExt(agRoot, GEN_MSGU_HOST_SCRATCH_PAD_1, MSGU_HOST_SCRATCH_PAD_1, value); } if (bitMapQueueNum1) { /* update the inbound queue number to HOST_SCRATCH_PAD2 register for queue 32 to 63 */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_2); value |= bitMapQueueNum1; siHalRegWriteExt(agRoot, GEN_MSGU_HOST_SCRATCH_PAD_2, MSGU_HOST_SCRATCH_PAD_2, value); } /* Write bit 2 to Inbound DoorBell Register */ siHalRegWriteExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET, IBDB_IBQ_FREEZE); /* wait until Inbound DoorBell Clear Register toggled */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); /* Read Inbound DoorBell Register - for RevB */ // value = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_IBDB_SET); value = MSGU_READ_IDR; value &= IBDB_IBQ_FREEZE; } while ((value != togglevalue) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("mpiFreezeInboundQueue: IBDB value/toggle = 0x%x 0x%x\n", value, togglevalue)); return AGSA_RC_FAILURE; } return AGSA_RC_SUCCESS; } /******************************************************************************/ /** \fn mpiUnFreezeInboundQueue(agsaRoot_t *agRoot) * \brief Freeze the inbound queue * * \param agRoot Handles for this instance of SAS/SATA hardware * \param bitMapQueueNum0 bit map for inbound queue number 0 - 31 to freeze * \param bitMapQueueNum1 bit map for inbound queue number 32 - 63 to freeze * * Return: * AGSA_RC_SUCCESS if Un-initialize the configuration table sucessful * AGSA_RC_FAILURE if Un-initialize the configuration table failed */ /******************************************************************************/ GLOBAL bit32 mpiUnFreezeInboundQueue(agsaRoot_t *agRoot, bit32 bitMapQueueNum0, bit32 bitMapQueueNum1) { bit32 value, togglevalue; bit32 max_wait_time; bit32 max_wait_count; SA_DBG2(("Entering function:mpiUnFreezeInboundQueue\n")); SA_ASSERT(NULL != agRoot, "agRoot argument cannot be null"); togglevalue = 0; if (bitMapQueueNum0) { /* update the inbound queue number to HOST_SCRATCH_PAD1 register - for queue 0 to 31 */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_1,MSGU_SCRATCH_PAD_1); value |= bitMapQueueNum0; siHalRegWriteExt(agRoot, GEN_MSGU_HOST_SCRATCH_PAD_1, MSGU_HOST_SCRATCH_PAD_1, value); } if (bitMapQueueNum1) { /* update the inbound queue number to HOST_SCRATCH_PAD2 register - for queue 32 to 63 */ value = siHalRegReadExt(agRoot, GEN_MSGU_SCRATCH_PAD_2,MSGU_SCRATCH_PAD_2); value |= bitMapQueueNum1; siHalRegWriteExt(agRoot, GEN_MSGU_HOST_SCRATCH_PAD_2, MSGU_HOST_SCRATCH_PAD_2, value); } /* Write bit 2 to Inbound DoorBell Register */ siHalRegWriteExt(agRoot, GEN_MSGU_IBDB_SET, MSGU_IBDB_SET, IBDB_IBQ_UNFREEZE); /* wait until Inbound DoorBell Clear Register toggled */ max_wait_time = WAIT_SECONDS(gWait_2); /* 2 sec */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); /* Read Inbound DoorBell Register - for RevB */ value = MSGU_READ_IDR; value &= IBDB_IBQ_UNFREEZE; } while ((value != togglevalue) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("mpiUnFreezeInboundQueue: IBDB value/toggle = 0x%x 0x%x\n", value, togglevalue)); return AGSA_RC_FAILURE; } return AGSA_RC_SUCCESS; } #endif /* TEST ****************************************************************/ GLOBAL bit32 si_check_V_HDA(agsaRoot_t *agRoot) { bit32 ret = AGSA_RC_SUCCESS; bit32 hda_status = 0; hda_status = (ossaHwRegReadExt(agRoot, PCIBAR0, SPC_V_HDA_RESPONSE_OFFSET+28)); SA_DBG1(("si_check_V_HDA: hda_status 0x%08X\n",hda_status )); if((hda_status & SPC_V_HDAR_RSPCODE_MASK) == SPC_V_HDAR_IDLE) { /* HDA mode */ SA_DBG1(("si_check_V_HDA: HDA mode, value = 0x%x\n", hda_status)); ret = AGSA_RC_HDA_NO_FW_RUNNING; } return(ret); } GLOBAL bit32 si_check_V_Ready(agsaRoot_t *agRoot) { bit32 ret = AGSA_RC_SUCCESS; bit32 SCRATCH_PAD1; bit32 max_wait_time; bit32 max_wait_count; /* ILA */ max_wait_time = (200 * 1000); /* wait 200 milliseconds */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); SCRATCH_PAD1 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1); } while (((SCRATCH_PAD1 & SCRATCH_PAD1_V_ILA_MASK) != SCRATCH_PAD1_V_ILA_MASK) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("si_check_V_Ready: SCRATCH_PAD1_V_ILA_MASK (0x%x) not set SCRATCH_PAD1 = 0x%x\n",SCRATCH_PAD1_V_ILA_MASK, SCRATCH_PAD1)); return( AGSA_RC_FAILURE); } /* RAAE */ max_wait_time = (200 * 1000); /* wait 200 milliseconds */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); SCRATCH_PAD1 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1); } while (((SCRATCH_PAD1 & SCRATCH_PAD1_V_RAAE_MASK) != SCRATCH_PAD1_V_RAAE_MASK) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("si_check_V_Ready: SCRATCH_PAD1_V_RAAE_MASK (0x%x) not set SCRATCH_PAD1 = 0x%x\n",SCRATCH_PAD1_V_RAAE_MASK, SCRATCH_PAD1)); return( AGSA_RC_FAILURE); } /* IOP0 */ max_wait_time = (200 * 1000); /* wait 200 milliseconds */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); SCRATCH_PAD1 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1); } while (((SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP0_MASK) != SCRATCH_PAD1_V_IOP0_MASK) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("si_check_V_Ready: SCRATCH_PAD1_V_IOP0_MASK (0x%x) not set SCRATCH_PAD1 = 0x%x\n",SCRATCH_PAD1_V_IOP0_MASK ,SCRATCH_PAD1)); return( AGSA_RC_FAILURE); } /* IOP1 */ max_wait_time = (200 * 1000); /* wait 200 milliseconds */ max_wait_count = MAKE_MODULO(max_wait_time,WAIT_INCREMENT); do { ossaStallThread(agRoot, WAIT_INCREMENT); SCRATCH_PAD1 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1); } while (((SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP1_MASK) != SCRATCH_PAD1_V_IOP1_MASK) && (max_wait_count -= WAIT_INCREMENT)); if (!max_wait_count) { SA_DBG1(("si_check_V_Ready: SCRATCH_PAD1_V_IOP1_MASK (0x%x) not set SCRATCH_PAD1 = 0x%x\n",SCRATCH_PAD1_V_IOP1_MASK, SCRATCH_PAD1)); // return( AGSA_RC_FAILURE); } return(ret); } GLOBAL bit32 siScratchDump(agsaRoot_t *agRoot) { bit32 SCRATCH_PAD1; bit32 ret =0; #ifdef SALLSDK_DEBUG bit32 SCRATCH_PAD2; bit32 SCRATCH_PAD3; bit32 SCRATCH_PAD0; SCRATCH_PAD0 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_0); SCRATCH_PAD2 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_2); SCRATCH_PAD3 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_3); #endif /* SALLSDK_DEBUG */ SCRATCH_PAD1 = ossaHwRegReadExt(agRoot, PCIBAR0, MSGU_SCRATCH_PAD_1); SA_DBG1(("siScratchDump: SCRATCH_PAD 0 0x%08x 1 0x%08x 2 0x%08x 3 0x%08x\n",SCRATCH_PAD0,SCRATCH_PAD1,SCRATCH_PAD2,SCRATCH_PAD3 )); if((SCRATCH_PAD1 & SCRATCH_PAD1_V_RESERVED) == SCRATCH_PAD1_V_RESERVED ) { SA_DBG1(("siScratchDump: SCRATCH_PAD1 SCRATCH_PAD1_V_RESERVED 0x%08x\n", SCRATCH_PAD1_V_RESERVED)); } else { if((SCRATCH_PAD1 & SCRATCH_PAD1_V_RAAE_MASK) == SCRATCH_PAD1_V_RAAE_MASK ) { SA_DBG1(("siScratchDump: SCRATCH_PAD1 valid 0x%08x\n",SCRATCH_PAD0 )); SA_DBG1(("siScratchDump: RAAE ready 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_RAAE_MASK)); } if((SCRATCH_PAD1 & SCRATCH_PAD1_V_ILA_MASK) == SCRATCH_PAD1_V_ILA_MASK) { SA_DBG1(("siScratchDump: ILA ready 0x%08x\n", SCRATCH_PAD1 & SCRATCH_PAD1_V_ILA_MASK)); } if(SCRATCH_PAD1 & SCRATCH_PAD1_V_BOOTSTATE_MASK) { SA_DBG1(("siScratchDump: BOOTSTATE not success 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_BOOTSTATE_MASK)); } if((SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP0_MASK) == SCRATCH_PAD1_V_IOP0_MASK) { SA_DBG1(("siScratchDump: IOP0 ready 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP0_MASK)); } if((SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP1_MASK) == SCRATCH_PAD1_V_IOP1_MASK) { SA_DBG1(("siScratchDump: IOP1 ready 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_IOP1_MASK )); } if((SCRATCH_PAD1 & SCRATCH_PAD1_V_READY) == SCRATCH_PAD1_V_READY) { SA_DBG1(("siScratchDump: SCRATCH_PAD1_V_READY 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_READY )); } if((SCRATCH_PAD1 & SCRATCH_PAD1_V_BOOTSTATE_MASK) == SCRATCH_PAD1_V_BOOTSTATE_MASK) { SA_DBG1(("siScratchDump: SCRATCH_PAD1_V_BOOTSTATE_MASK 0x%08x\n",SCRATCH_PAD1 & SCRATCH_PAD1_V_BOOTSTATE_MASK )); } } return(ret); } void si_macro_check(agsaRoot_t *agRoot) { SA_DBG1(("si_macro_check:smIS_SPC %d\n",smIS_SPC(agRoot) )); SA_DBG1(("si_macro_check:smIS_HIL %d\n",smIS_HIL(agRoot) )); SA_DBG1(("si_macro_check:smIS_SFC %d\n",smIS_SFC(agRoot) )); SA_DBG1(("si_macro_check:smIS_spc8001 %d\n",smIS_spc8001(agRoot) )); SA_DBG1(("si_macro_check:smIS_spc8081 %d\n",smIS_spc8081(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8008 %d\n",smIS_SPCV8008(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8009 %d\n",smIS_SPCV8009(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8018 %d\n",smIS_SPCV8018(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8019 %d\n",smIS_SPCV8019(agRoot) )); SA_DBG1(("si_macro_check:smIS_ADAP8088 %d\n",smIS_ADAP8088(agRoot) )); SA_DBG1(("si_macro_check:smIS_ADAP8089 %d\n",smIS_ADAP8089(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8070 %d\n",smIS_SPCV8070(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8071 %d\n",smIS_SPCV8071(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8072 %d\n",smIS_SPCV8072(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8073 %d\n",smIS_SPCV8073(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8074 %d\n",smIS_SPCV8074(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8075 %d\n",smIS_SPCV8075(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8076 %d\n",smIS_SPCV8076(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV8077 %d\n",smIS_SPCV8077(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV9015 %d\n",smIS_SPCV9015(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV9060 %d\n",smIS_SPCV9060(agRoot) )); SA_DBG1(("si_macro_check:smIS_SPCV %d\n",smIS_SPCV(agRoot) )); SA_DBG1(("si_macro_check:smIS64bInt %d\n", smIS64bInt(agRoot) )); } Index: head/sys/dev/pms/RefTisa/tisa/sassata/common/ossacmnapi.c =================================================================== --- head/sys/dev/pms/RefTisa/tisa/sassata/common/ossacmnapi.c (revision 359440) +++ head/sys/dev/pms/RefTisa/tisa/sassata/common/ossacmnapi.c (revision 359441) @@ -1,9093 +1,9093 @@ /******************************************************************************* *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the *following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided *with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED *WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS *FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT *NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR *BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ********************************************************************************/ /*******************************************************************************/ /** \file * * * * This file contains CB functions used by lower layer in SAS/SATA TD layer * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #ifdef FDS_SM #include #include #include #endif #ifdef FDS_DM #include #include #include #endif #include #include #include #ifdef INITIATOR_DRIVER #include #include #include #endif #ifdef TARGET_DRIVER #include #include #include #endif #include #include #ifdef ECHO_TESTING /* temporary to test saEchoCommand() */ extern bit8 gEcho; #endif #if defined(SALLSDK_DEBUG) extern bit32 gLLDebugLevel; #endif #include #ifdef SA_ENABLE_TRACE_FUNCTIONS #ifdef siTraceFileID #undef siTraceFileID #endif #define siTraceFileID 'R' #endif /* functions that are common to SAS and SATA */ FORCEINLINE void ossaCacheInvalidate( agsaRoot_t *agRoot, void *osMemHandle, void *virtPtr, bit32 length ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG6(("ossaCacheInvalidate: start\n")); ostiCacheInvalidate(tiRoot, osMemHandle, virtPtr, length); return; } FORCEINLINE void ossaCacheFlush( agsaRoot_t *agRoot, void *osMemHandle, void *virtPtr, bit32 length ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG6(("ossaCacheFlush: start\n")); ostiCacheFlush(tiRoot, osMemHandle, virtPtr, length); return; } FORCEINLINE void ossaCachePreFlush( agsaRoot_t *agRoot, void *osMemHandle, void *virtPtr, bit32 length ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG6(("ossaCachePreFlush: start\n")); ostiCachePreFlush(tiRoot, osMemHandle, virtPtr, length); return; } /***************************************************************************** *! \brief ossaDeviceHandleAccept * * Purpose: This function is called by lower layer to inform TD layer of * a new SAS device arrival. Used only at the target * * * \param agRoot Pointer to chip/driver Instance. * \param agDevHandle Pointer to the device handle of the device * \param agDevInfo Pointer to the device info structure * \param agPortContext Pointer to a port context * * \return: * OSSA_RC_REJECT A device is accpeted * OSSA_RC_ACCEPT A device is rejected * * \note - For details, refer to SAS/SATA Low-Level API Specification * *****************************************************************************/ osGLOBAL bit32 ossaDeviceHandleAccept( agsaRoot_t *agRoot, agsaDevHandle_t *agDevHandle, agsaSASDeviceInfo_t *agDevInfo, agsaPortContext_t *agPortContext, bit32 *hostAssignedDeviceId ) { #ifdef TARGET_DRIVER tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdsaPortContext_t *onePortContext = agNULL; tiPortalContext_t *tiPortalContext = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; tiDeviceHandle_t *tiDeviceHandle = agNULL; tdsaSASSubID_t agSASSubID; bit32 option; bit32 param; /* at target only by default TD layer accpets all devices */ /* at this point, by LINK_UP event tdsaPortContext should have been created */ smTraceFuncEnter(hpDBG_VERY_LOUD, "Y0"); TI_DBG1(("ossaDeviceHandleAccept: start hostAssignedDeviceId 0x%X\n",*hostAssignedDeviceId)); if (agPortContext == agNULL) { TI_DBG1(("ossaDeviceHandleAccept: NULL agsaPortContext; wrong\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Y0"); return OSSA_RC_REJECT; } onePortContext = (tdsaPortContext_t *)agPortContext->osData; if (onePortContext == agNULL) { TI_DBG1(("ossaDeviceHandleAccept: NULL oneportcontext; wrong\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "Y0"); return OSSA_RC_REJECT; } tiPortalContext = (tiPortalContext_t *)onePortContext->tiPortalContext; if (tiPortalContext == agNULL) { TI_DBG1(("ossaDeviceHandleAccept: NULL tiPortalContext; wrong\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "Y0"); return OSSA_RC_REJECT; } /* add the device to device list cf) OSSA_DISCOVER_FOUND_DEVICE */ TI_DBG4(("ossaDeviceHandleAccept: sasAddressHi 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSHI(&agDevInfo->commonDevInfo))); TI_DBG4(("ossaDeviceHandleAccept: sasAddressLo 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSLO(&agDevInfo->commonDevInfo))); TI_DBG4(("ossaDeviceHandleAccept: device type 0x%x\n", DEVINFO_GET_DEVICETTYPE(&agDevInfo->commonDevInfo))); TI_DBG4(("ossaDeviceHandleAccept: phys %d\n", agDevInfo->numOfPhys)); TI_DBG4(("ossaDeviceHandleAccept: pid %d\n", onePortContext->id)); if (DEVINFO_GET_DEVICETTYPE(&agDevInfo->commonDevInfo) == SAS_END_DEVICE) { TI_DBG4(("ossaDeviceHandleAccept: SAS_END_DEVICE\n")); } else if (DEVINFO_GET_DEVICETTYPE(&agDevInfo->commonDevInfo) == SAS_EDGE_EXPANDER_DEVICE) { TI_DBG4(("ossaDeviceHandleAccept: SAS_EDGE_EXPANDER_DEVICE\n")); } else /* SAS_FANOUT_EXPANDER_DEVICE */ { TI_DBG4(("ossaDeviceHandleAccept: SAS_FANOUT_EXPANDER_DEVICE\n")); } agSASSubID.sasAddressHi = SA_DEVINFO_GET_SAS_ADDRESSHI(&agDevInfo->commonDevInfo); agSASSubID.sasAddressLo = SA_DEVINFO_GET_SAS_ADDRESSLO(&agDevInfo->commonDevInfo); agSASSubID.initiator_ssp_stp_smp = agDevInfo->initiator_ssp_stp_smp; agSASSubID.target_ssp_stp_smp = agDevInfo->target_ssp_stp_smp; tdssAddSASToSharedcontext( onePortContext, agRoot, agDevHandle, &agSASSubID, agTRUE, 0xFF, TD_OPERATION_TARGET ); /* at this point devicedata for new device exists */ oneDeviceData = (tdsaDeviceData_t *)agDevHandle->osData; if (oneDeviceData == agNULL) { TI_DBG1(("ossaDeviceHandleAccept: NULL oneDeviceData; wrong\n")); return OSSA_RC_REJECT; } oneDeviceData->registered = agTRUE; tiDeviceHandle = &(oneDeviceData->tiDeviceHandle); if (tiDeviceHandle == agNULL) { TI_DBG1(("ossaDeviceHandleAccept: NULL tiDeviceHandle; wrong\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "Y0"); return OSSA_RC_REJECT; } /* setting MCN in agsaDeviceInfo_t*/ agDevInfo->commonDevInfo.flag = agDevInfo->commonDevInfo.flag | (tdsaAllShared->MCN << 16); /* increment RegisteredDevNums */ onePortContext->RegisteredDevNums++; *hostAssignedDeviceId |= 0xBEEF0000; TI_DBG1(("ossaDeviceHandleAccept: Now hostAssignedDeviceId 0x%X\n", *hostAssignedDeviceId)); /* no login in SAS */ /* osGLOBAL bit32 ostiTargetEvent ( tiRoot_t *tiRoot, tiPortalContext_t *portalContext, tiDeviceHandle_t *tiDeviceHandle, tiTgtEventType_t eventType, bit32 eventStatus, void *parm ); */ ostiTargetEvent( tiRoot, tiPortalContext, tiDeviceHandle, tiTgtEventTypeDeviceChange, tiDeviceArrival, agNULL ); /* set MCN and initiator role bit using saSetDeviceInfo */ option = 24; /* setting MCN and initiator role 1 1000b*/ param = (1 << 18) | (tdsaAllShared->MCN << 24); TI_DBG1(("ossaDeviceHandleAccept: option 0x%x param 0x%x MCN 0x%x\n", option, param, tdsaAllShared->MCN)); saSetDeviceInfo(agRoot, agNULL, 0, agDevHandle, option, param, ossaSetDeviceInfoCB); smTraceFuncExit(hpDBG_VERY_LOUD, 'e', "Y0"); return OSSA_RC_ACCEPT; #endif #ifdef INITIATOR_DRIVER /* this function is not used in case of Initiator */ return OSSA_RC_ACCEPT; #endif } #ifdef INITIATOR_DRIVER /***************************************************************************** *! \brief ossaDiscoverSasCB * * Purpose: This function is called by lower layer to inform TD layer of * SAS discovery results * * * \param agRoot Pointer to chip/driver Instance. * \param agPortContext Pointer to the port context of TD and Lower layer * \param event event type * \param pParm1 Pointer to data associated with event * \param pParm2 Pointer to data associated with event * * \return: none * * \note - For details, refer to SAS/SATA Low-Level API Specification * *****************************************************************************/ osGLOBAL void ossaDiscoverSasCB(agsaRoot_t *agRoot, agsaPortContext_t *agPortContext, bit32 event, void *pParm1, void *pParm2 ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)osData->tdsaAllShared; tdsaPortContext_t *onePortContext = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; agsaDevHandle_t *agDevHandle = agNULL; agsaSASDeviceInfo_t *agDeviceInfo = agNULL; tiPortalContext_t *tiPortalContext = agNULL; tdList_t *DeviceListList; tdsaSASSubID_t agSASSubID; smTraceFuncEnter(hpDBG_VERY_LOUD,"Y1"); TI_DBG2(("ossaDiscoverSasCB: start\n")); if (agPortContext == agNULL) { TI_DBG1(("ossaDiscoverSasCB: NULL agsaPortContext; wrong\n")); return; } onePortContext = (tdsaPortContext_t *)agPortContext->osData; tiPortalContext = (tiPortalContext_t *)onePortContext->tiPortalContext; switch ( event ) { case OSSA_DISCOVER_STARTED: { TI_DBG3(("ossaDiscoverSasCB: STARTED pid %d\n", onePortContext->id)); /* invalidate all devices in current device list */ DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); TI_DBG3(("ossaDiscoverSasCB: loop did %d\n", oneDeviceData->id)); TI_DBG3(("ossaDiscoverSasCB: loop sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG6(("ossaDiscoverSasCB: loop sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); if (oneDeviceData->tdPortContext == onePortContext) { TI_DBG3(("ossaDiscoverSasCB: did %d is invalidated \n", oneDeviceData->id)); /* temporary solution: only for sata direct attached */ } DeviceListList = DeviceListList->flink; } onePortContext->DiscoveryState = ITD_DSTATE_STARTED; break; } case OSSA_DISCOVER_FOUND_DEVICE: { TI_DBG4(("ossaDiscoverSasCB: $$$$$ FOUND_DEVICE pid %d\n", onePortContext->id)); agDevHandle = (agsaDevHandle_t *)pParm1; agDeviceInfo = (agsaSASDeviceInfo_t *)pParm2; TI_DBG5(("ossaDiscoverSasCB: sasAddressHi 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSHI(&agDeviceInfo->commonDevInfo))); TI_DBG5(("ossaDiscoverSasCB: sasAddressLo 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSLO(&agDeviceInfo->commonDevInfo))); TI_DBG5(("ossaDiscoverSasCB: device type 0x%x\n", DEVINFO_GET_DEVICETTYPE(&agDeviceInfo->commonDevInfo))); TI_DBG6(("ossaDiscoverSasCB: phys %d\n", agDeviceInfo->numOfPhys)); TI_DBG4(("ossaDiscoverSasCB: pid %d\n", onePortContext->id)); /* Add only target devices; do not add expander device */ if (DEVINFO_GET_DEVICETTYPE(&agDeviceInfo->commonDevInfo) == SAS_END_DEVICE) { agSASSubID.sasAddressHi = SA_DEVINFO_GET_SAS_ADDRESSHI(&agDeviceInfo->commonDevInfo); agSASSubID.sasAddressLo = SA_DEVINFO_GET_SAS_ADDRESSLO(&agDeviceInfo->commonDevInfo); agSASSubID.initiator_ssp_stp_smp = agDeviceInfo->initiator_ssp_stp_smp; agSASSubID.target_ssp_stp_smp = agDeviceInfo->target_ssp_stp_smp; TI_DBG2(("ossaDiscoverSasCB: adding ....\n")); tdssAddSASToSharedcontext( onePortContext, agRoot, agDevHandle, &agSASSubID, agTRUE, agDeviceInfo->phyIdentifier, TD_OPERATION_INITIATOR ); ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceArrival, agNULL ); } else { TI_DBG5(("ossaDiscoverSasCB: $$$$$ not end device. not adding....\n")); } break; } case OSSA_DISCOVER_REMOVED_DEVICE: { TI_DBG3(("ossaDiscoverSasCB: REMOVED_DEVICE\n")); agDevHandle = (agsaDevHandle_t *)pParm1; agDeviceInfo = (agsaSASDeviceInfo_t *)pParm2; oneDeviceData = (tdsaDeviceData_t *) agDevHandle->osData; TI_DBG6(("ossaDiscoverSasCB: sasAddressHi 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSHI(&agDeviceInfo->commonDevInfo))); TI_DBG6(("ossaDiscoverSasCB: sasAddressLo 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSLO(&agDeviceInfo->commonDevInfo))); TI_DBG6(("ossaDiscoverSasCB: phys %d\n", agDeviceInfo->numOfPhys)); TI_DBG6(("ossaDiscoverSasCB: onePortContext->id %d\n", onePortContext->id)); if (oneDeviceData == agNULL) { TI_DBG1(("ossaDiscoverSasCB: Wrong. DevHandle->osData is NULL but is being removed\n")); } else { tdssRemoveSASFromSharedcontext(onePortContext, oneDeviceData, agRoot); agDevHandle->osData = agNULL; ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceRemoval, agNULL ); } break; } case OSSA_DISCOVER_COMPLETE: { TI_DBG2(("ossaDiscoverSasCB: SAS COMPLETE pid %d\n", onePortContext->id)); /* note: SAS discovery must be called before SATA discovery "onePortContext->DiscoveryState = ITD_DSTATE_COMPLETED" is in ossaDiscoverSataCB not in ossaDiscoverSasCB when SATA_ENABLE */ #ifndef SATA_ENABLE onePortContext->DiscoveryState = ITD_DSTATE_COMPLETED; TI_DBG6(("ossaDiscoverSasCB: COMPLETE pid %d\n", onePortContext->id)); #endif #ifdef SATA_ENABLE TI_DBG2(("ossaDiscoverSasCB: calling SATA discovery\n")); /* Continue with SATA discovery */ saDiscover(agRoot, agPortContext, AG_SA_DISCOVERY_TYPE_SATA, onePortContext->discoveryOptions); #else /* SATA not enable */ #ifdef TD_INTERNAL_DEBUG /* for debugging */ /* dump device list */ DeviceListList = tdsaAllShared->MainPortContextList.flink; while (DeviceListList != &(tdsaAllShared->MainPortContextList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); TI_DBG2(("ossaDiscoverSasCB: did %d valid %d\n", oneDeviceData->id, oneDeviceData->valid)); TI_DBG2(("ossaDiscoverSasCB: device AddrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("ossaDiscoverSasCB: device AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); DeviceListList = DeviceListList->flink; } #endif /* letting OS layer know discovery has been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscOK, agNULL ); #endif /* SATA_ENABLE */ break; } case OSSA_DISCOVER_ABORT: { TI_DBG3(("ossaDiscoverSasCB: ABORT\n")); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } case OSSA_DISCOVER_ABORT_ERROR_1: { TI_DBG3(("ossaDiscoverSasCB: ERROR 1\n")); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } case OSSA_DISCOVER_ABORT_ERROR_2: { TI_DBG3(("ossaDiscoverSasCB: ERROR 2\n")); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } case OSSA_DISCOVER_ABORT_ERROR_3: { TI_DBG3(("ossaDiscoverSasCB: ERROR 3\n")); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } case OSSA_DISCOVER_ABORT_ERROR_4: { TI_DBG3(("ossaDiscoverSasCB: ERROR 4\n")); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } case OSSA_DISCOVER_ABORT_ERROR_5: { TI_DBG3(("ossaDiscoverSasCB: ERROR 5\n")); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } case OSSA_DISCOVER_ABORT_ERROR_6: { TI_DBG3(("ossaDiscoverSasCB: ERROR 6\n")); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } case OSSA_DISCOVER_ABORT_ERROR_7: { TI_DBG3(("ossaDiscoverSasCB: ERROR 7\n")); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } case OSSA_DISCOVER_ABORT_ERROR_8: { TI_DBG3(("ossaDiscoverSasCB: ERROR 8\n")); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } case OSSA_DISCOVER_ABORT_ERROR_9: { TI_DBG3(("ossaDiscoverSasCB: ERROR 9\n")); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } default: TI_DBG3(("ossaDiscoverSasCB: ERROR default event 0x%x\n", event)); /* letting OS layer know discovery has not been successfully complete */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); break; } /* end of switch */ smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Y1"); return; } #endif // #ifdef INITIATOR_DRIVER osGLOBAL void ossaLogTrace0( agsaRoot_t *agRoot, bit32 traceCode ) { return; } osGLOBAL void ossaLogTrace1( agsaRoot_t *agRoot, bit32 traceCode, bit32 value1 ) { return; } osGLOBAL void ossaLogTrace2( agsaRoot_t *agRoot, bit32 traceCode, bit32 value1, bit32 value2 ) { return; } osGLOBAL void ossaLogTrace3( agsaRoot_t *agRoot, bit32 traceCode, bit32 value1, bit32 value2, bit32 value3 ) { return; } osGLOBAL void ossaLogTrace4( agsaRoot_t *agRoot, bit32 traceCode, bit32 value1, bit32 value2, bit32 value3, bit32 value4 ) { return; } /***************************************************************************** *! \brief ossaHwCB * * Purpose: This function is called by lower layer to inform TD layer of * HW related results * * \param agRoot Pointer to chip/driver Instance. * \param agPortContext Pointer to the port context of TD and Lower layer * \param event event type * \param eventParm1 event-specific parameter * \param eventParm2 event-specific parameter * \param eventParm3 event-specific parameter of pointer type * * \return: none * * \note - For details, refer to SAS/SATA Low-Level API Specification * *****************************************************************************/ osGLOBAL void ossaHwCB( agsaRoot_t *agRoot, agsaPortContext_t *agPortContext, bit32 event, bit32 eventParm1, void *eventParm2, void *eventParm3 ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)osData->tdsaAllShared; tdList_t *PortContextList = agNULL; tdsaPortContext_t *onePortContext = agNULL; agsaDevHandle_t *agDevHandle = agNULL; agsaSASIdentify_t *IDframe = agNULL; int i = 0; #ifdef INITIATOR_DRIVER tdsaSASSubID_t agSASSubID; #endif bit32 PhyID; bit32 PhyStatus; bit32 LinkRate; bit32 PortState; bit32 HwAckSatus = AGSA_RC_SUCCESS; // #ifdef INITIATOR_DRIVER #ifdef INITIATOR_DRIVER agsaFisRegDeviceToHost_t *RegD2H = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; #endif #ifdef REMOVED bit32 found = agFALSE; #endif agsaHWEventEncrypt_t *pEncryptCBData; agsaEncryptInfo_t *pEncryptInfo; agsaHWEventMode_t *pModeEvent; tiEncryptPort_t encryptEventData; tiEncryptInfo_t encryptInfo; bit32 *pModePage; bit32 securityMode; bit32 cipherMode; bit32 encryptStatus; bit32 securitySetModeStatus; bit32 securityModeStatus; // #endif /* INITIATOR_DRIVER */ agsaPhyErrCountersPage_t *agPhyErrCountersPage; agsaEventSource_t eventSource; #ifdef FDS_DM dmRoot_t *dmRoot = &(tdsaAllShared->dmRoot); dmPortContext_t *dmPortContext = agNULL; bit32 status = DM_RC_FAILURE; dmPortInfo_t dmPortInfo; // bit32 discStatus = dmDiscInProgress; #endif smTraceFuncEnter(hpDBG_VERY_LOUD,"Y2"); TI_DBG2(("ossaHwCB: agPortContext %p event 0x%x eventParm1 0x%x eventParm2 %p eventParm3 %p\n", agPortContext,event,eventParm1,eventParm2,eventParm3 )); switch ( event ) { case OSSA_HW_EVENT_SAS_PHY_UP: { PhyID = TD_GET_PHY_ID(eventParm1); LinkRate = TD_GET_LINK_RATE(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); agDevHandle = agNULL; IDframe = (agsaSASIdentify_t *)eventParm3; TI_DBG2(("ossaHwCB: Phy%d SAS link Up\n", PhyID)); if (agPortContext == agNULL) { TI_DBG1(("ossaHwCB: agPortContext null, wrong\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Y2"); return; } if (agDevHandle == agNULL) { TI_DBG3(("ossaHwCB: agDevHandle null by design change\n")); } if (IDframe == agNULL) { TI_DBG1(("ossaHwCB: IDframe null, wrong\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "Y2"); return; } /* debugging only */ if (LinkRate == 0x01) { TI_DBG1(("ossaHwCB: SAS Link Rate is 1.5 Gbps PhyID %d\n",PhyID)); } if (LinkRate == 0x02) { TI_DBG1(("ossaHwCB: SAS Link Rate is 3.0 Gbps PhyID %d\n",PhyID)); } if (LinkRate == 0x04) { TI_DBG1(("ossaHwCB: SAS Link Rate is 6.0 Gbps PhyID %d\n",PhyID)); } if (LinkRate == 0x08) { TI_DBG1(("ossaHwCB: SAS Link Rate is 12.0 Gbps PhyID %d\n",PhyID)); } if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with SAS link up\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "Y2"); return; } if ( agPortContext->osData == agNULL) {/* if */ TI_DBG6 (("ossaHwCB: PhyID %d tdsaAllShared %p\n", PhyID, tdsaAllShared)); if (tdsaAllShared->Ports[PhyID].tiPortalContext == agNULL) { TI_DBG6(("ossaHwCB: NULL portalcontext\n")); } else { TI_DBG6(("ossaHwCB: NOT NULL portalcontext\n")); } if (IDframe == agNULL) { TI_DBG1(("ossaHwCB: IDFrame is NULL; SATA !!!!\n")); } else { TI_DBG3(("ossaHwCB: IDframe->sasAddressHi 0x%08x \n", SA_IDFRM_GET_SAS_ADDRESSHI(IDframe))); TI_DBG3(("ossaHwCB: IDframe->sasAddressLo 0x%08x \n", SA_IDFRM_GET_SAS_ADDRESSLO(IDframe))); } /* setting tdsaPortContext fields take the head from the FreeLink of tdsaPortContext_t then modify it then put it in MainLink of tdsaPortContext_t */ tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); if (TDLIST_NOT_EMPTY(&(tdsaAllShared->FreePortContextList))) { TDLIST_DEQUEUE_FROM_HEAD(&PortContextList, &(tdsaAllShared->FreePortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); onePortContext = TDLIST_OBJECT_BASE(tdsaPortContext_t, FreeLink, PortContextList); TI_DBG2(("ossaHwCB: pid %d\n", onePortContext->id)); TI_DBG6(("ossaHwCB: onePortContext %p\n", onePortContext)); if (onePortContext == agNULL) { TI_DBG1(("ossaHwCB: onePortContext is NULL in allocation, wrong!\n")); return; } /* sets fields of tdsaportcontext */ #ifdef INITIATOR_DRIVER onePortContext->DiscoveryState = ITD_DSTATE_NOT_STARTED; onePortContext->discoveryOptions = AG_SA_DISCOVERY_OPTION_FULL_START; #endif onePortContext->PhyIDList[PhyID] = agTRUE; if (IDframe == agNULL) { onePortContext->sasRemoteAddressHi = 0xFFFFFFFF; onePortContext->sasRemoteAddressLo = 0xFFFFFFFF; onePortContext->directAttatchedSAS = agTRUE; } else { onePortContext->sasRemoteAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(IDframe); onePortContext->sasRemoteAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(IDframe); /* Create ID frame and storing ID frame */ osti_memcpy(&onePortContext->sasIDframe, IDframe, sizeof(agsaSASIdentify_t)); tdhexdump("ossaHWCB: sasIDframe", (bit8 *)(&onePortContext->sasIDframe), sizeof(agsaSASIdentify_t)); if (SA_IDFRM_GET_DEVICETTYPE(IDframe) == SAS_END_DEVICE) { onePortContext->directAttatchedSAS = agTRUE; } #ifdef FDS_DM if (SA_IDFRM_GET_DEVICETTYPE(IDframe) == SAS_EDGE_EXPANDER_DEVICE || SA_IDFRM_GET_DEVICETTYPE(IDframe) == SAS_FANOUT_EXPANDER_DEVICE ) { onePortContext->UseDM = agTRUE; } #endif } onePortContext->sasLocalAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&tdsaAllShared->Ports[PhyID].SASID); onePortContext->sasLocalAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&tdsaAllShared->Ports[PhyID].SASID); onePortContext->tiPortalContext = tdsaAllShared->Ports[PhyID].tiPortalContext; onePortContext->agRoot = agRoot; onePortContext->agPortContext = agPortContext; tdsaAllShared->Ports[PhyID].portContext = onePortContext; agPortContext->osData = onePortContext; onePortContext->valid = agTRUE; if (LinkRate == 0x01) { onePortContext->LinkRate = SAS_CONNECTION_RATE_1_5G; } else if (LinkRate == 0x02) { onePortContext->LinkRate = SAS_CONNECTION_RATE_3_0G; } else if (LinkRate == 0x04) { onePortContext->LinkRate = SAS_CONNECTION_RATE_6_0G; } else /* (LinkRate == 0x08) */ { onePortContext->LinkRate = SAS_CONNECTION_RATE_12_0G; } tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); TDLIST_ENQUEUE_AT_TAIL(&(onePortContext->MainLink), &(tdsaAllShared->MainPortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); #ifdef FDS_DM dmPortContext = &(onePortContext->dmPortContext); dmPortContext->tdData = onePortContext; /* set up dmPortInfo_t */ PORTINFO_PUT_SAS_REMOTE_ADDRESSLO(&dmPortInfo, onePortContext->sasRemoteAddressLo); PORTINFO_PUT_SAS_REMOTE_ADDRESSHI(&dmPortInfo, onePortContext->sasRemoteAddressHi); PORTINFO_PUT_SAS_LOCAL_ADDRESSLO(&dmPortInfo, onePortContext->sasLocalAddressLo); PORTINFO_PUT_SAS_LOCAL_ADDRESSHI(&dmPortInfo, onePortContext->sasLocalAddressHi); TI_DBG2(("ossaHwCB: phy %d hi 0x%x lo 0x%x\n", PhyID, SA_IDFRM_GET_SAS_ADDRESSHI(&(tdsaAllShared->Ports[PhyID].SASID)), SA_IDFRM_GET_SAS_ADDRESSLO(&(tdsaAllShared->Ports[PhyID].SASID)))); TI_DBG2(("ossaHwCB: LocalAddrHi 0x%08x LocaAddrLo 0x%08x\n", onePortContext->sasLocalAddressHi, onePortContext->sasLocalAddressLo)); dmPortInfo.flag = onePortContext->LinkRate; if (onePortContext->UseDM == agTRUE) { TI_DBG1(("ossaHwCB: calling dmCreatePort\n")); status = dmCreatePort(dmRoot, dmPortContext, &dmPortInfo); if (status != DM_RC_SUCCESS) { TI_DBG1(("ossaHwCB: dmCreatePort failed!!! 0x%x\n", status)); } } #endif } else { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); TI_DBG1(("\nossaHwCB: Attention!!! no more free PortContext.\n")); } #ifdef TD_INTERNAL_DEBUG /* for debugging only */ print_tdlist_flink(&(tdsaPortContext->FreeLink), 1, 1); print_tdlist_flink(&(tdsaPortContext->MainLink), 1, 2); print_tdlist_flink(&(tdsaDeviceData->FreeLink), 2, 1); print_tdlist_flink(&(tdsaDeviceData->MainLink), 2, 2); #endif #ifdef TD_INTERNAL_DEBUG /* for debugging */ PortContextList = tdsaPortContext->MainLink.flink; while (PortContextList != &(tdsaPortContext->MainLink)) { twoPortContext = TDLIST_OBJECT_BASE(tdsaPortContext_t, MainLink, PortContextList); TI_DBG6(("ossaHwCB: in while portContext ID %d\n", twoPortContext->id)); TI_DBG6(("ossaHwCB: in while PortContext %p\n", twoPortContext)); PortContextList = PortContextList->flink; } #endif /* add agDevHandle */ if (SA_IDFRM_GET_DEVICETTYPE(IDframe) != SAS_NO_DEVICE) { #ifdef INITIATOR_DRIVER agSASSubID.sasAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(IDframe); agSASSubID.sasAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(IDframe); agSASSubID.initiator_ssp_stp_smp = IDframe->initiator_ssp_stp_smp; agSASSubID.target_ssp_stp_smp = IDframe->target_ssp_stp_smp; #endif TI_DBG2(("ossaHwCB: adding ....\n")); /* uses only SASIDframe not agsaSASDeviceInfo_t */ #ifdef INITIATOR_DRIVER tdssAddSASToSharedcontext( onePortContext, agRoot, agDevHandle, /* agNULL */ &agSASSubID, agTRUE, (bit8)PhyID, TD_OPERATION_INITIATOR ); #endif #ifdef FDS_DM if (SA_IDFRM_GET_DEVICETTYPE(IDframe) == SAS_END_DEVICE && SA_IDFRM_IS_SSP_TARGET(IDframe) ) { TI_DBG2(("ossaHwCB: NOTIFY_ENABLE_SPINUP PhyID %d \n", PhyID)); for (i=0;iPorts[PhyID].tiPortalContext ); #endif } else { TI_DBG5(("ossaHwCB: $$$$$ not end device. not adding....\n")); } saPortControl(agRoot, /* AGSA_PORT_SET_PORT_RECOVERY_TIME */ agNULL, 0, agPortContext, AGSA_PORT_SET_PORT_RECOVERY_TIME, tdsaAllShared->portTMO, //PORT_RECOVERY_TIMEOUT 0 ); /* setting SAS PORT RESET TMO and SATA PORT RESET TMO*/ if (tIsSPCV12G(agRoot)) { saPortControl(agRoot, /* AGSA_PORT_SET_PORT_RESET_TIME */ agNULL, 0, agPortContext, AGSA_PORT_SET_PORT_RESET_TIME, SAS_12G_PORT_RESET_TMO, // 800 ms 0 ); } else { saPortControl(agRoot, /* AGSA_PORT_SET_PORT_RESET_TIME */ agNULL, 0, agPortContext, AGSA_PORT_SET_PORT_RESET_TIME, SAS_PORT_RESET_TMO, // 300 ms 0 ); } } else { /* an existing portcontext to be tested */ TI_DBG2(("ossaHwCB: SAS existing portcontext returned\n")); onePortContext = (tdsaPortContext_t *)agPortContext->osData; if (onePortContext == agNULL) { TI_DBG1(("ossaHwCB: onePortContext is NULL, wrong!\n")); return; } if (onePortContext->valid == agFALSE) { /* port has been invalidated; needs to be allocated */ TI_DBG2(("ossaHwCB: SAS allocating port context\n")); tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); if (TDLIST_NOT_EMPTY(&(tdsaAllShared->FreePortContextList))) { TDLIST_DEQUEUE_FROM_HEAD(&PortContextList, &(tdsaAllShared->FreePortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); onePortContext = TDLIST_OBJECT_BASE(tdsaPortContext_t, FreeLink, PortContextList); TI_DBG2(("ossaHwCB: allocating pid %d\n", onePortContext->id)); TI_DBG6(("ossaHwCB: allocating onePortContext %p\n", onePortContext)); if (onePortContext == agNULL) { TI_DBG1(("ossaHwCB: onePortContext is NULL in allocation, wrong!\n")); return; } /* sets fields of tdsaportcontext */ #ifdef INITIATOR_DRIVER onePortContext->DiscoveryState = ITD_DSTATE_NOT_STARTED; onePortContext->discoveryOptions = AG_SA_DISCOVERY_OPTION_FULL_START; #endif onePortContext->PhyIDList[PhyID] = agTRUE; if (IDframe == agNULL) { onePortContext->sasRemoteAddressHi = 0xFFFFFFFF; onePortContext->sasRemoteAddressLo = 0xFFFFFFFF; onePortContext->directAttatchedSAS = agTRUE; } else { onePortContext->sasRemoteAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(IDframe); onePortContext->sasRemoteAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(IDframe); /* Create ID frame and storing ID frame */ osti_memcpy(&onePortContext->sasIDframe, IDframe, sizeof(agsaSASIdentify_t)); tdhexdump("ossaHWCB: sasIDframe", (bit8 *)(&onePortContext->sasIDframe), sizeof(agsaSASIdentify_t)); if (SA_IDFRM_GET_DEVICETTYPE(IDframe) == SAS_END_DEVICE) { onePortContext->directAttatchedSAS = agTRUE; } } onePortContext->sasLocalAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&tdsaAllShared->Ports[PhyID].SASID); onePortContext->sasLocalAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&tdsaAllShared->Ports[PhyID].SASID); onePortContext->tiPortalContext = tdsaAllShared->Ports[PhyID].tiPortalContext; onePortContext->agRoot = agRoot; onePortContext->agPortContext = agPortContext; tdsaAllShared->Ports[PhyID].portContext = onePortContext; agPortContext->osData = onePortContext; onePortContext->valid = agTRUE; if (LinkRate == 0x01) { onePortContext->LinkRate = SAS_CONNECTION_RATE_1_5G; } else if (LinkRate == 0x02) { onePortContext->LinkRate = SAS_CONNECTION_RATE_3_0G; } else if (LinkRate == 0x04) { onePortContext->LinkRate = SAS_CONNECTION_RATE_6_0G; } else /* (LinkRate == 0x08) */ { onePortContext->LinkRate = SAS_CONNECTION_RATE_12_0G; } tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); TDLIST_ENQUEUE_AT_TAIL(&(onePortContext->MainLink), &(tdsaAllShared->MainPortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } else { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); TI_DBG1(("\nossaHwCB: Attention!!! no more free PortContext.\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "Y2"); return; } } /* invalidated port */ else { /* already alloacated */ TI_DBG2(("ossaHwCB: SAS already allocated port context\n")); if (TDLIST_EMPTY(&(tdsaAllShared->MainPortContextList))) { TI_DBG1(("ossaHwCB: wrong!!! null tdsaPortContext list\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'e', "Y2"); return; } if (onePortContext == agNULL) { TI_DBG1(("ossaHwCB: wrong !!! No corressponding tdsaPortContext\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'f', "Y2"); return; } TI_DBG2(("ossaHwCB: existing pid %d\n", onePortContext->id)); if (tdsaAllShared->Ports[PhyID].portContext == agNULL) { TI_DBG1(("ossaHwCB: existing allshared pid is NULL\n")); } else { TI_DBG2(("ossaHwCB: existing allshared pid %d\n", tdsaAllShared->Ports[PhyID].portContext->id)); } /* updates PhyID belong to a port */ onePortContext->PhyIDList[PhyID] = agTRUE; #ifdef FDS_DM if (SA_IDFRM_GET_DEVICETTYPE(IDframe) == SAS_END_DEVICE && SA_IDFRM_IS_SSP_TARGET(IDframe) ) { TI_DBG2(("ossaHwCB: NOTIFY_ENABLE_SPINUP PhyID %d \n", PhyID)); for (i=0;iSeenLinkUp = agTRUE; } /* else, old portcontext */ break; } #ifdef INITIATOR_DRIVER case OSSA_HW_EVENT_SATA_PHY_UP: { PhyID = TD_GET_PHY_ID(eventParm1); LinkRate = TD_GET_LINK_RATE(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); agDevHandle = agNULL; RegD2H = ( agsaFisRegDeviceToHost_t *)eventParm3; TI_DBG2(("ossaHwCB: Phy%d SATA link Up\n", PhyID)); if (agDevHandle == agNULL) { TI_DBG3(("ossaHwCB: agDevHandle null by design change\n")); } if (RegD2H == agNULL) { TI_DBG1(("ossaHwCB: RegD2H null, wrong\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'g', "Y2"); return; } TI_DBG2(("ossaHwCB: agDevHandle %p\n", agDevHandle)); tdhexdump("ossaHWCB RegD2H", (bit8 *)RegD2H, sizeof(agsaFisRegDeviceToHost_t)); TI_DBG2(("ossaHwCB: Sector Count %d\n", RegD2H->d.sectorCount)); TI_DBG2(("ossaHwCB: LBA LOW %d\n", RegD2H->d.lbaLow)); TI_DBG2(("ossaHwCB: LBA MID %d\n", RegD2H->d.lbaMid)); TI_DBG2(("ossaHwCB: LBA HIGH %d\n", RegD2H->d.lbaHigh)); TI_DBG2(("ossaHwCB: DEVICE %d\n", RegD2H->d.device)); /* debugging only */ if (LinkRate == 0x01) { TI_DBG1(("ossaHwCB: SATA Link Rate is 1.5 Gbps PhyID %d\n",PhyID)); } if (LinkRate == 0x02) { TI_DBG1(("ossaHwCB: SATA Link Rate is 3.0 Gbps PhyID %d\n",PhyID)); } if (LinkRate == 0x04) { TI_DBG1(("ossaHwCB: SATA Link Rate is 6.0 Gbps PhyID %d\n",PhyID)); } if (LinkRate == 0x08) { TI_DBG1(("ossaHwCB: SATA Link Rate is 12.0 Gbps PhyID %d\n",PhyID)); } if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with SATA link up\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'h', "Y2"); return; } if ( agPortContext->osData == agNULL) {/* if */ TI_DBG6 (("ossaHwCB: PhyID %d tdsaAllShared %p\n", PhyID, tdsaAllShared)); tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); if (TDLIST_NOT_EMPTY(&(tdsaAllShared->FreePortContextList))) { TDLIST_DEQUEUE_FROM_HEAD(&PortContextList, &(tdsaAllShared->FreePortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); onePortContext = TDLIST_OBJECT_BASE(tdsaPortContext_t, FreeLink, PortContextList); TI_DBG2(("ossaHwCB: pid %d\n", onePortContext->id)); TI_DBG6(("ossaHwCB: onePortContext %p\n", onePortContext)); if (onePortContext == agNULL) { TI_DBG1(("ossaHwCB: onePortContext is NULL in allocation, wrong!\n")); return; } /* sets fields of tdsaportcontext */ onePortContext->DiscoveryState = ITD_DSTATE_NOT_STARTED; onePortContext->discoveryOptions = AG_SA_DISCOVERY_OPTION_FULL_START; onePortContext->PhyIDList[PhyID] = agTRUE; /* NO sas address for SATA */ onePortContext->sasRemoteAddressHi = 0xFFFFFFFF; onePortContext->sasRemoteAddressLo = 0xFFFFFFFF; /* copying the signature */ onePortContext->remoteSignature[0] = RegD2H->d.sectorCount; onePortContext->remoteSignature[1] = RegD2H->d.lbaLow; onePortContext->remoteSignature[2] = RegD2H->d.lbaMid; onePortContext->remoteSignature[3] = RegD2H->d.lbaHigh; onePortContext->remoteSignature[4] = RegD2H->d.device; onePortContext->sasLocalAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&tdsaAllShared->Ports[PhyID].SASID); onePortContext->sasLocalAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&tdsaAllShared->Ports[PhyID].SASID); onePortContext->tiPortalContext = tdsaAllShared->Ports[PhyID].tiPortalContext; onePortContext->agRoot = agRoot; onePortContext->agPortContext = agPortContext; tdsaAllShared->Ports[PhyID].portContext = onePortContext; agPortContext->osData = onePortContext; onePortContext->nativeSATAMode = agTRUE; onePortContext->valid = agTRUE; if (LinkRate == 0x01) { onePortContext->LinkRate = SAS_CONNECTION_RATE_1_5G; } else if (LinkRate == 0x02) { onePortContext->LinkRate = SAS_CONNECTION_RATE_3_0G; } else if (LinkRate == 0x04) { onePortContext->LinkRate = SAS_CONNECTION_RATE_6_0G; } else /* (LinkRate == 0x08) */ { onePortContext->LinkRate = SAS_CONNECTION_RATE_12_0G; } tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); TDLIST_ENQUEUE_AT_TAIL(&(onePortContext->MainLink), &(tdsaAllShared->MainPortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } else { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); TI_DBG1(("\nossaHwCB: Attention!!! no more free PortContext.\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'i', "Y2"); return; } #ifdef SATA_ENABLE /* tdssAddSATAToSharedcontext() sends identify device data to find out the uniqueness of target. In identify device data CB fn (satAddSATAIDDevCB()), tiPortLinkUp and tiPortDiscoveryReady happen */ tdssAddSATAToSharedcontext( onePortContext, agRoot, agDevHandle, /* agNULL */ agNULL, agTRUE, (bit8)PhyID ); #endif /* setting SAS PORT RESET TMO and SATA PORT RESET TMO*/ saPortControl(agRoot, /* AGSA_PORT_SET_PORT_RESET_TIME */ agNULL, 0, agPortContext, AGSA_PORT_SET_PORT_RESET_TIME, 0, SATA_PORT_RESET_TMO // 8000 ms ); } else { /* an existing portcontext to be tested */ TI_DBG1(("ossaHwCB: SATA existing portcontext returned. need testing\n")); onePortContext = (tdsaPortContext_t *)agPortContext->osData; /* for debugging only */ if (onePortContext->valid == agFALSE) { /* port has been invalidated; needs to be allocated */ TI_DBG2(("ossaHwCB: SATA allocating port context\n")); } else { /* already alloacated */ TI_DBG1(("ossaHwCB: Wrong!!! SATA already allocated port context\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'j', "Y2"); return; } tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); if (TDLIST_NOT_EMPTY(&(tdsaAllShared->FreePortContextList))) { TDLIST_DEQUEUE_FROM_HEAD(&PortContextList, &(tdsaAllShared->FreePortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); onePortContext = TDLIST_OBJECT_BASE(tdsaPortContext_t, FreeLink, PortContextList); TI_DBG2(("ossaHwCB: pid %d\n", onePortContext->id)); TI_DBG6(("ossaHwCB: onePortContext %p\n", onePortContext)); if (onePortContext == agNULL) { TI_DBG1(("ossaHwCB: onePortContext is NULL in allocation, wrong!\n")); return; } /* sets fields of tdsaportcontext */ onePortContext->DiscoveryState = ITD_DSTATE_NOT_STARTED; onePortContext->discoveryOptions = AG_SA_DISCOVERY_OPTION_FULL_START; onePortContext->PhyIDList[PhyID] = agTRUE; /* NO sas address for SATA */ onePortContext->sasRemoteAddressHi = 0xFFFFFFFF; onePortContext->sasRemoteAddressLo = 0xFFFFFFFF; /* copying the signature */ onePortContext->remoteSignature[0] = RegD2H->d.sectorCount; onePortContext->remoteSignature[1] = RegD2H->d.lbaLow; onePortContext->remoteSignature[2] = RegD2H->d.lbaMid; onePortContext->remoteSignature[3] = RegD2H->d.lbaHigh; onePortContext->remoteSignature[4] = RegD2H->d.device; onePortContext->sasLocalAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&tdsaAllShared->Ports[PhyID].SASID); onePortContext->sasLocalAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&tdsaAllShared->Ports[PhyID].SASID); onePortContext->tiPortalContext = tdsaAllShared->Ports[PhyID].tiPortalContext; onePortContext->agRoot = agRoot; onePortContext->agPortContext = agPortContext; tdsaAllShared->Ports[PhyID].portContext = onePortContext; agPortContext->osData = onePortContext; onePortContext->nativeSATAMode = agTRUE; onePortContext->valid = agTRUE; if (LinkRate == 0x01) { onePortContext->LinkRate = SAS_CONNECTION_RATE_1_5G; } else if (LinkRate == 0x02) { onePortContext->LinkRate = SAS_CONNECTION_RATE_3_0G; } else if (LinkRate == 0x04) { onePortContext->LinkRate = SAS_CONNECTION_RATE_6_0G; } else /* (LinkRate == 0x08) */ { onePortContext->LinkRate = SAS_CONNECTION_RATE_12_0G; } tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); TDLIST_ENQUEUE_AT_TAIL(&(onePortContext->MainLink), &(tdsaAllShared->MainPortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } else { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); TI_DBG1(("\nossaHwCB: Attention!!! no more free PortContext.\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'k', "Y2"); return; } /*hotplug */ #ifdef SATA_ENABLE tdssAddSATAToSharedcontext( onePortContext, agRoot, agDevHandle, /* agNULL */ agNULL, agTRUE, (bit8)PhyID ); #endif /* end hotplug */ } break; } #endif case OSSA_HW_EVENT_SATA_SPINUP_HOLD: { PhyID = TD_GET_PHY_ID(eventParm1); TI_DBG2(("ossaHwCB: spinup hold PhyID %d\n", PhyID)); break; } case OSSA_HW_EVENT_PHY_DOWN: { bit32 AllPhyDown = agTRUE; /* 4/15/08 spec */ PhyID = TD_GET_PHY_ID(eventParm1); LinkRate = TD_GET_LINK_RATE(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); TI_DBG2(("ossaHwCB: Phy%d link Down\n", PhyID)); if (agPortContext == agNULL) { TI_DBG1(("ossaHwCB: agPortContext null, wrong\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'l', "Y2"); return; } if ( agPortContext->osData == agNULL) { /* if */ /* PortContext must exit at this point */ TI_DBG1(("ossaHwCB: NULL portalcontext. Error. Can't be NULL\n")); } else { TI_DBG3(("ossaHwCB: NOT NULL portalcontext\n")); onePortContext = (tdsaPortContext_t *)agPortContext->osData; if (onePortContext == agNULL) { TI_DBG1(("ossaHwCB: wrong !!! No corressponding tdsaPortContext\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'm', "Y2"); return; } onePortContext->PhyIDList[PhyID] = agFALSE; for(i=0;iPhyIDList[i] == agTRUE) { TI_DBG3(("ossaHwCB: Phy %d is still up\n", i)); AllPhyDown = agFALSE; break; } } /* last phy belong to the portcontext */ if (AllPhyDown == agTRUE) { #ifdef NOT_YET TI_DBG1(("ossaHwCB: calling tiPortLinkDown\n")); ostiPortEvent ( tiRoot, tiPortLinkDown, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif } if (PortState == OSSA_PORT_VALID) { /* do nothing */ /* no ack for every phy down */ #ifdef FDS_DM /* update MCN for all devices belong to this port */ tdsaUpdateMCN(dmRoot, onePortContext); #endif } else if (PortState == OSSA_PORT_LOSTCOMM) { /* 1. Mark the port as invalid and stop the io for that port and its device No ack here. Otherwise, port will be released by FW. */ TI_DBG2(("ossaHwCB: phy Down and OSSA_PORT_LOSTCOMM\n")); /* save eventSource related information in tdsaAllShared */ tdsaAllShared->eventSource[PhyID].EventValid = agTRUE; tdsaAllShared->eventSource[PhyID].Source.agPortContext = agPortContext; tdsaAllShared->eventSource[PhyID].Source.event = OSSA_HW_EVENT_PHY_DOWN; /* phy ID */ tdsaAllShared->eventSource[PhyID].Source.param = PhyID; /* phy ID */ onePortContext->eventPhyID = PhyID; /* to stop IO's */ onePortContext->valid = agFALSE; break; } else if (PortState == OSSA_PORT_IN_RESET) { TI_DBG2(("ossaHwCB: phy Down and OSSA_PORT_IN_RESET\n")); /* save eventSource related information in tdsaAllShared */ tdsaAllShared->eventSource[PhyID].EventValid = agTRUE; tdsaAllShared->eventSource[PhyID].Source.agPortContext = agPortContext; tdsaAllShared->eventSource[PhyID].Source.event = OSSA_HW_EVENT_PHY_DOWN; /* phy ID */ tdsaAllShared->eventSource[PhyID].Source.param = PhyID; /* phy ID */ onePortContext->eventPhyID = PhyID; /* to stop IO's */ onePortContext->valid = agFALSE; break; } else if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Last phy Down and port invalid OSSA_PORT_INVALID\n")); /* invalidate port then, saHwEventAck() in ossaDeregisterDeviceHandleCB() */ /* save eventSource related information in tdsaAllShared */ tdsaAllShared->eventSource[PhyID].EventValid = agTRUE; tdsaAllShared->eventSource[PhyID].Source.agPortContext = agPortContext; tdsaAllShared->eventSource[PhyID].Source.event = OSSA_HW_EVENT_PHY_DOWN; /* phy ID */ tdsaAllShared->eventSource[PhyID].Source.param = PhyID; /* phy ID */ onePortContext->eventPhyID = PhyID; onePortContext->valid = agFALSE; TI_DBG2(("ossaHwCB: pid %d\n", onePortContext->id)); #ifdef INITIATOR_DRIVER /* notifying link down (all links belonging to a port are down) */ ostiPortEvent( tiRoot, tiPortStopped, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif #ifdef TARGET_DRIVER ostiPortEvent( tiRoot, tiPortLinkDown, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif #ifdef INITIATOR_DRIVER tdssReportRemovals(agRoot, onePortContext, agFALSE ); #endif #ifdef TARGET_DRIVER ttdssReportRemovals(agRoot, onePortContext, agFALSE ); #endif /* find a PhyID and reset for portContext in tdssSASShared */ for(i=0;iPhyIDList[i] == agTRUE) { tdsaAllShared->Ports[i].portContext = agNULL; } } /* portcontext is removed from MainLink to FreeLink in tdssReportRemovals or ossaDeregisterDeviceHandleCB */ }/* OSSA_PORT_INVALID */ else { /* other newly defined port state */ /* do nothing */ TI_DBG2(("ossaHwCB: portstate 0x%x\n", PortState)); } } /* big else */ break; } case OSSA_HW_EVENT_PHY_START_STATUS: { PhyID = TD_GET_PHY_ID(eventParm1); PhyStatus = TD_GET_PHY_STATUS(eventParm1); TI_DBG6(("ossaHwCB: OSSA_HW_EVENT_PHY_START_STATUS\n")); if (PhyStatus == 0x00) { TI_DBG6(("ossaHwCB: OSSA_HW_EVENT_PHY_START_STATUS, SUCCESS\n")); } else if (PhyStatus == 0x01) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_START_STATUS, INVALID_PHY\n")); } else if (PhyStatus == 0x02) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_START_STATUS, PHY_NOT_DISABLED\n")); } else { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_START_STATUS, OTHER_FAILURE %d\n", PhyStatus)); } break; } case OSSA_HW_EVENT_PHY_STOP_STATUS: { agsaContext_t *agContext; PhyID = TD_GET_PHY_ID(eventParm1); PhyStatus = TD_GET_PHY_STATUS(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PHY_STOP_STATUS\n")); if (PhyStatus == 0x00) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_STOP_STATUS, SUCCESS\n")); agContext = (agsaContext_t *)eventParm2; - onePortContext = (tdsaPortContext_t *)agContext->osData;; + onePortContext = (tdsaPortContext_t *)agContext->osData; if (onePortContext == agNULL) { TI_DBG1(("ossaHwCB: onePortContext is null, wrong!!!\n")); return; } onePortContext->PhyIDList[PhyID] = agFALSE; if (PortState == OSSA_PORT_INVALID) /* invalid port */ { TI_DBG1(("ossaHwCB: OSSA_PORT_INVALID\n")); tdsaAllShared->eventSource[PhyID].EventValid = NO_ACK; onePortContext->eventPhyID = PhyID; onePortContext->valid = agFALSE; TI_DBG2(("ossaHwCB: pid %d\n", onePortContext->id)); #ifdef INITIATOR_DRIVER /* notifying link down (all links belonging to a port are down) */ ostiPortEvent( tiRoot, tiPortStopped, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif #ifdef TARGET_DRIVER ostiPortEvent( tiRoot, tiPortLinkDown, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif #ifdef INITIATOR_DRIVER tdssReportRemovals(agRoot, onePortContext, agFALSE ); #endif #ifdef TARGET_DRIVER ttdssReportRemovals(agRoot, onePortContext, agFALSE ); #endif /* find a PhyID and reset for portContext in tdssSASShared */ for(i=0;iPhyIDList[i] == agTRUE) { tdsaAllShared->Ports[i].portContext = agNULL; } } /* portcontext is removed from MainLink to FreeLink in tdssReportRemovals or ossaDeregisterDeviceHandleCB */ } /* invalid port */ } else if (PhyStatus == 0x01) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_STOP_STATUS, INVALID_PHY\n")); } else if (PhyStatus == 0x02) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_STOP_STATUS, DEVICES_ATTACHED\n")); } else if (PhyStatus == 0x03) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_STOP_STATUS, OTHER_FAILURE\n")); } else if (PhyStatus == 0x04) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_STOP_STATUS, PHY_NOT_DISABLED\n")); } else { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_STOP_STATUS, Unknown %d\n", PhyStatus)); } break; } case OSSA_HW_EVENT_RESET_START: { bit32 new_status = TD_GET_RESET_STATUS(eventParm1); TI_DBG2(("ossaHwCB: RESET_START, status %d\n", new_status)); if (new_status == OSSA_SUCCESS) { tdsaAllShared->flags.resetInProgress = agTRUE; TI_DBG2(("ossaHwCB: RESET_START, SUCCESS\n")); } else if (new_status == OSSA_FAILURE) { TI_DBG1(("ossaHwCB: RESET_START, FAILURE\n")); } else { TI_DBG1(("ossaHwCB: RESET_START, PENDING\n")); } break; } case OSSA_HW_EVENT_RESET_COMPLETE: { bit32 new_status = TD_GET_RESET_STATUS(eventParm1); #ifdef SOFT_RESET_TEST DbgPrint("Reset Complete\n"); #endif TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_RESET_COMPLETE, status %d\n", new_status)); if (new_status == OSSA_SUCCESS) { /* remove all portcontext and devices */ #ifdef INITIATOR_DRIVER tdssRemoveSASSATAFromSharedcontextByReset(agRoot); #endif tdsaAllShared->flags.resetInProgress = agFALSE; /* a callback notifying reset completion */ ostiPortEvent( tiRoot, tiPortResetComplete, tiSuccess, agNULL ); } else { /* a callback notifying reset completion */ tdsaAllShared->flags.resetInProgress = agFALSE; ostiPortEvent( tiRoot, tiPortResetComplete, tiError, agNULL ); } break; } case OSSA_HW_EVENT_PHY_ERR_INBOUND_CRC: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); agPhyErrCountersPage = (agsaPhyErrCountersPage_t *)eventParm2; TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_INBOUND_CRC from PhyID %d; to be tested\n", PhyID)); if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with OSSA_HW_EVENT_PHY_ERR_INBOUND_CRC\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'n', "Y2"); return; } if (agPhyErrCountersPage != agNULL) { TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_INBOUND_CRC from PhyID %d\n", PhyID)); TI_DBG1(("ossaHwCB: iDw %d rDE %d cV %d lS %d rP %d iCRC %d\n", agPhyErrCountersPage->invalidDword, agPhyErrCountersPage->runningDisparityError, agPhyErrCountersPage->codeViolation, agPhyErrCountersPage->lossOfDwordSynch, agPhyErrCountersPage->phyResetProblem, agPhyErrCountersPage->inboundCRCError )); } else { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_INBOUND_CRC: Error!!! eventParm2 is NULL\n")); } /* saHwEventAck() */ eventSource.agPortContext = agPortContext; eventSource.event = OSSA_HW_EVENT_PHY_ERR_INBOUND_CRC; /* phy ID */ eventSource.param = PhyID; HwAckSatus = saHwEventAck( agRoot, agNULL, /* agContext */ 0, &eventSource, /* agsaEventSource_t */ 0, 0 ); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("ossaHwCB: failing in saHwEventAck; status %d\n", HwAckSatus)); smTraceFuncExit(hpDBG_VERY_LOUD, 'o', "Y2"); return; } break; } #ifdef REMOVED case OSSA_HW_EVENT_PORT_INVALID: { TI_DBG1(("ossaHwCB: PORT_INVALID\n")); if ( agPortContext == agNULL) { TI_DBG1(("ossaHwCB: agPortContext is NULL, wrong.\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'p', "Y2"); return; } if ( agPortContext->osData != agNULL) { TI_DBG1(("ossaHwCB: NOT NULL osDATA\n")); /* put the old portcontext back to free list */ onePortContext = (tdsaPortContext_t *)agPortContext->osData; TI_DBG1(("ossaHwCB: pid %d\n", onePortContext->id)); #ifdef INITIATOR_DRIVER /* notifying link down (all links belonging to a port are down) */ ostiPortEvent ( tiRoot, tiPortStopped, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif /* INITIATOR_DRIVER */ #ifdef TARGET_DRIVER ostiPortEvent( tiRoot, tiPortLinkDown, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif /*TARGET_DRIVER */ /* find the device belonging to the port and remove it from the device list */ //tdssRemoveSASSATAFromSharedcontext(agRoot, tdsaDeviceData, onePortContext); #ifdef INITIATOR_DRIVER /* reset the fields of portcontext */ onePortContext->DiscoveryState = ITD_DSTATE_NOT_STARTED; tdssReportRemovals(agRoot, onePortContext, agFALSE ); onePortContext->discoveryOptions = AG_SA_DISCOVERY_OPTION_FULL_START; onePortContext->DiscoveryRdyGiven = agFALSE; onePortContext->SeenLinkUp = agFALSE; #endif /* INITIATOR_DRIVER */ /* for hotplug */ /* find a PhyID and reset for portContext in tdssSASShared */ for(i=0;iPhyIDList[i] == agTRUE) { tdsaAllShared->Ports[i].portContext = agNULL; } } /* reset PhyIDList in portcontext */ for(i=0;iPhyIDList[i] = agFALSE; } // onePortContext->tiPortalContext = agNULL; // onePortContext->agRoot = agNULL; onePortContext->agPortContext = agNULL; onePortContext->valid = agFALSE; TI_DBG4(("ossaHwCB: pid %d count %d\n", onePortContext->id, onePortContext->Count)); /* resets the number of devices in onePortContext */ onePortContext->Count = 0; onePortContext->discovery.pendingSMP = 0; onePortContext->discovery.SeenBC = agFALSE; /* put all devices belonging to the onePortContext back to the free link */ tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); TDLIST_DEQUEUE_THIS(&(onePortContext->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(onePortContext->FreeLink), &(tdsaPortContext->FreeLink)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } else { TI_DBG1(("ossaHwCB: NULL osDATA: wrong\n")); } TI_DBG6(("ossaHwCB: PORT_INVALID end\n")); break; } #endif /* REMOVED */ case OSSA_HW_EVENT_BROADCAST_CHANGE: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); TI_DBG1(("ossaHwCB: BROADCAST_CHANGE from PhyID %d\n", PhyID)); if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with BROADCAST_CHANGE\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'q', "Y2"); return; } /* saHwEventAck() */ eventSource.agPortContext = agPortContext; eventSource.event = OSSA_HW_EVENT_BROADCAST_CHANGE; /* phy ID */ eventSource.param = PhyID; HwAckSatus = saHwEventAck( agRoot, agNULL, /* agContext */ 0, &eventSource, /* agsaEventSource_t */ 0, 0 ); TI_DBG4(("ossaHwCB: calling saHwEventAck\n")); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("ossaHwCB: failing in saHwEventAck; status %d\n", HwAckSatus)); smTraceFuncExit(hpDBG_VERY_LOUD, 'r', "Y2"); return; } if (tIsSPC12SATA(agRoot)) { TI_DBG1(("ossaHwCB: BROADCAST_CHANGE received for SATA Controller\n")); break; } /* * incremental discovery is to be tested and debugged further */ /* just for testing discovery abort */ #ifdef FDS_DM_NO if (agPortContext == agNULL) { /* this case happens when broadcase is received first before the link up */ TI_DBG2(("ossaHwCB: agPortContext is NULL. Do nothing.\n")); } else if ( agPortContext->osData != agNULL) { dmRoot = &(tdsaAllShared->dmRoot); onePortContext = (tdsaPortContext_t *)agPortContext->osData; dmPortContext = &(onePortContext->dmPortContext); dmQueryDiscovery(dmRoot, dmPortContext); // dmDiscover(dmRoot, dmPortContext, DM_DISCOVERY_OPTION_ABORT); #if 1 if (onePortContext->DMDiscoveryState == dmDiscInProgress) { dmDiscover(dmRoot, dmPortContext, DM_DISCOVERY_OPTION_ABORT); } #endif /* 1 */ TI_DBG2(("ossaHwCB: portcontext pid %d\n", onePortContext->id)); if (onePortContext->DMDiscoveryState == dmDiscCompleted || onePortContext->DMDiscoveryState == dmDiscAborted || onePortContext->DMDiscoveryState == dmDiscAbortInvalid ) { TI_DBG1(("ossaHwCB: BROADCAST_CHANGE; calling dmNotifyBC and does incremental discovery\n")); dmNotifyBC(dmRoot, dmPortContext, OSSA_HW_EVENT_BROADCAST_CHANGE); dmDiscover(dmRoot, dmPortContext, DM_DISCOVERY_OPTION_INCREMENTAL_START); } else { TI_DBG2(("ossaHwCB: pid %d BROADCAST_CHANGE; updating SeenBC. calling dmNotifyBC\n", onePortContext->id)); dmNotifyBC(dmRoot, dmPortContext, OSSA_HW_EVENT_BROADCAST_CHANGE); } } else { TI_DBG1(("ossaHwCB: BROADCAST_CHANGE NULL osDATA wrong !!! \n")); } #endif /* FDS_DM_NO */ #ifdef FDS_DM if (agPortContext == agNULL) { /* this case happens when broadcase is received first before the link up */ TI_DBG2(("ossaHwCB: agPortContext is NULL. Do nothing.\n")); } else if ( agPortContext->osData != agNULL) { dmRoot = &(tdsaAllShared->dmRoot); onePortContext = (tdsaPortContext_t *)agPortContext->osData; dmPortContext = &(onePortContext->dmPortContext); dmQueryDiscovery(dmRoot, dmPortContext); TI_DBG2(("ossaHwCB: portcontext pid %d\n", onePortContext->id)); if (onePortContext->DMDiscoveryState == dmDiscCompleted || onePortContext->DMDiscoveryState == dmDiscAborted || onePortContext->DMDiscoveryState == dmDiscAbortInvalid ) { TI_DBG1(("ossaHwCB: BROADCAST_CHANGE; calling dmNotifyBC and does incremental discovery, pid %d\n", onePortContext->id)); onePortContext->DiscoveryState = ITD_DSTATE_STARTED; dmNotifyBC(dmRoot, dmPortContext, OSSA_HW_EVENT_BROADCAST_CHANGE); dmDiscover(dmRoot, dmPortContext, DM_DISCOVERY_OPTION_INCREMENTAL_START); } else if (onePortContext->DMDiscoveryState == dmDiscFailed ) { TI_DBG1(("ossaHwCB: dmDiscFailed; pid %d BROADCAST_CHANGE; updating SeenBC. calling dmNotifyBC\n", onePortContext->id)); onePortContext->DiscFailNSeenBC = agTRUE; dmNotifyBC(dmRoot, dmPortContext, OSSA_HW_EVENT_BROADCAST_CHANGE); } else { TI_DBG2(("ossaHwCB: pid %d BROADCAST_CHANGE; updating SeenBC. calling dmNotifyBC\n", onePortContext->id)); dmNotifyBC(dmRoot, dmPortContext, OSSA_HW_EVENT_BROADCAST_CHANGE); } } else { TI_DBG1(("ossaHwCB: BROADCAST_CHANGE NULL osDATA wrong !!! \n")); } #endif /* FDS_DM */ #ifdef FDS_DM_WORKED if (agPortContext == agNULL) { /* this case happens when broadcase is received first before the link up */ TI_DBG2(("ossaHwCB: agPortContext is NULL. Do nothing.\n")); } else if ( agPortContext->osData != agNULL) { onePortContext = (tdsaPortContext_t *)agPortContext->osData; TI_DBG2(("ossaHwCB: calling dmNotifyBC\n")); dmRoot = &(tdsaAllShared->dmRoot); dmPortContext = &(onePortContext->dmPortContext); dmNotifyBC(dmRoot, dmPortContext, OSSA_HW_EVENT_BROADCAST_CHANGE); } #endif /* FDS_DM_WORKED */ #ifndef FDS_DM #ifdef INITIATOR_DRIVER if (agPortContext == agNULL) { /* this case happens when broadcase is received first before the link up */ TI_DBG2(("ossaHwCB: agPortContext is NULL. Do nothing.\n")); } else if ( agPortContext->osData != agNULL) { onePortContext = (tdsaPortContext_t *)agPortContext->osData; TI_DBG2(("ossaHwCB: portcontext pid %d\n", onePortContext->id)); if (onePortContext->DiscoveryState == ITD_DSTATE_COMPLETED) { TI_DBG1(("ossaHwCB: BROADCAST_CHANGE; does incremental discovery\n")); onePortContext->DiscoveryState = ITD_DSTATE_NOT_STARTED; onePortContext->discoveryOptions = AG_SA_DISCOVERY_OPTION_INCREMENTAL_START; /* processed broadcast change */ onePortContext->discovery.SeenBC = agFALSE; #ifdef TD_DISCOVER if (tdsaAllShared->ResetInDiscovery != 0 && onePortContext->discovery.ResetTriggerred == agTRUE) { TI_DBG2(("ossaHwCB: tdsaBCTimer\n")); tdsaBCTimer(tiRoot, onePortContext); } else { tdsaDiscover( tiRoot, onePortContext, TDSA_DISCOVERY_TYPE_SAS, TDSA_DISCOVERY_OPTION_INCREMENTAL_START ); } #else saDiscover(agRoot, agPortContext, AG_SA_DISCOVERY_TYPE_SAS, onePortContext->discoveryOptions); #endif } else { TI_DBG2(("ossaHwCB: pid %d BROADCAST_CHANGE; updating SeenBC. Do nothing.\n", onePortContext->id)); onePortContext->discovery.SeenBC = agTRUE; } } else { TI_DBG1(("ossaHwCB: BROADCAST_CHANGE NULL osDATA wrong !!! \n")); } #endif #endif /* ifndef FDS_DM */ break; } case OSSA_HW_EVENT_PORT_RECOVERY_TIMER_TMO: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); /* 1. tear town the portcontext just like link down last phy down 2. ack port state must be invalid */ TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PORT_RECOVERY_TIMER_TMO\n")); if (PortState == OSSA_PORT_VALID) { TI_DBG1(("ossaHwCB: Wrong port state\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 's', "Y2"); return; } TD_ASSERT(agPortContext, "agPortContext"); if ( agPortContext->osData == agNULL) { /* if */ /* PortContext must exit at this point */ TI_DBG1(("ossaHwCB: NULL portalcontext. Error. Can't be NULL\n")); } else { onePortContext = (tdsaPortContext_t *)agPortContext->osData; onePortContext->valid = agFALSE; TI_DBG1(("ossaHwCB: tiPortStopped pid %d\n", onePortContext->id)); #ifdef INITIATOR_DRIVER /* notifying link down (all links belonging to a port are down) */ ostiPortEvent( tiRoot, tiPortStopped, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif #ifdef TARGET_DRIVER ostiPortEvent( tiRoot, tiPortLinkDown, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif #ifdef INITIATOR_DRIVER tdssReportRemovals(agRoot, onePortContext, agFALSE ); #endif #ifdef TARGET_DRIVER ttdssReportRemovals(agRoot, onePortContext, agFALSE ); #endif /* find a PhyID and reset for portContext in tdssSASShared */ for(i=0;iPhyIDList[i] == agTRUE) { tdsaAllShared->Ports[i].portContext = agNULL; } } /* portcontext is removed from MainLink to FreeLink in tdssReportRemovals or ossaDeregisterDeviceHandleCB */ } break; } case OSSA_HW_EVENT_PORT_RESET_TIMER_TMO: { /* clean up */ PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PORT_RESET_TIMER_TMO\n")); if (PortState == OSSA_PORT_VALID) { TI_DBG1(("ossaHwCB: Wrong port state\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 't', "Y2"); return; } if (agPortContext == agNULL) { TI_DBG1(("ossaHwCB: agPortContext is NULL, error\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'u', "Y2"); return; } if ( agPortContext->osData == agNULL) { /* if */ /* PortContext must exit at this point */ TI_DBG1(("ossaHwCB: NULL portalcontext. Error. Can't be NULL\n")); } else { onePortContext = (tdsaPortContext_t *)agPortContext->osData; onePortContext->valid = agFALSE; TI_DBG1(("ossaHwCB: pid %d tiPortStopped\n", onePortContext->id)); #ifdef INITIATOR_DRIVER /* notifying link down (all links belonging to a port are down) */ ostiPortEvent( tiRoot, tiPortStopped, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif #ifdef TARGET_DRIVER ostiPortEvent( tiRoot, tiPortLinkDown, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif #ifdef INITIATOR_DRIVER tdssReportRemovals(agRoot, onePortContext, agFALSE ); #endif #ifdef TARGET_DRIVER ttdssReportRemovals(agRoot, onePortContext, agFALSE ); #endif /* find a PhyID and reset for portContext in tdssSASShared */ for(i=0;iPhyIDList[i] == agTRUE) { tdsaAllShared->Ports[i].portContext = agNULL; } } /* portcontext is removed from MainLink to FreeLink in tdssReportRemovals or ossaDeregisterDeviceHandleCB */ } break; } case OSSA_HW_EVENT_PORT_RESET_COMPLETE: { #ifdef INITIATOR_DRIVER tiIORequest_t *currentTaskTag = agNULL; #endif #ifdef REMOVED smRoot_t *smRoot = &(tdsaAllShared->smRoot); #endif PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); IDframe = (agsaSASIdentify_t *)eventParm3; /* completes for Lun Reset and Target reset for directly attached SATA */ /* completes for Target reset for directly attached SAS */ TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PORT_RESET_COMPLETE, phyID %d\n", PhyID)); /* error check */ if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'v', "Y2"); return; } if (agPortContext == agNULL) { TI_DBG1(("ossaHwCB: agPortContext null, wrong\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'w', "Y2"); return; } if ( agPortContext->osData == agNULL) { TI_DBG1(("ossaHwCB: agPortContext->osData null, wrong\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'x', "Y2"); return; } /* find a corresponding portcontext */ onePortContext = (tdsaPortContext_t *)agPortContext->osData; if (onePortContext == agNULL) { TI_DBG1(("ossaHwCB: oneportContext is NULL; wrong??????\n")); } else { TI_DBG1(("ossaHwCB: oneportContext %p pid %d\n", onePortContext, onePortContext->id)); onePortContext->valid = agTRUE; #ifdef INITIATOR_DRIVER #ifdef REMOVED if (tdsaAllShared->ResetInDiscovery != 0) { DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if (oneDeviceData->tdPortContext != onePortContext) { DeviceListList = DeviceListList->flink; } else { found = agTRUE; break; } } /* while */ if (found == agTRUE) { /* applied to only SATA devices */ if (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) { #ifdef FDS_SM tdIDStart(tiRoot, agRoot, smRoot, oneDeviceData, onePortContext); #else tdssRetrySATAID(tiRoot, oneDeviceData); #endif } } else { TI_DBG1(("ossaHwCB: no onedevicedata found!\n")); } } #endif /* completed TM */ DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if ( oneDeviceData == agNULL) { TI_DBG1(("ossaHwCB: oneDeviceData is NULL!!!\n")); return; } if ( (oneDeviceData->tdPortContext == onePortContext) && (oneDeviceData->directlyAttached == agTRUE) && (oneDeviceData->phyID == PhyID) ) { TI_DBG1(("ossaHwCB: found the onePortContext and oneDeviceData!!\n")); currentTaskTag = (tiIORequest_t *)oneDeviceData->agDeviceResetContext.osData; if (currentTaskTag != agNULL ) { /* applied to only SATA devices */ if (DEVICE_IS_SATA_DEVICE(oneDeviceData)) { tdIORequestBody_t *SMTMtdIORequestBody = agNULL; SMTMtdIORequestBody = (tdIORequestBody_t *)currentTaskTag->tdData; if (SMTMtdIORequestBody != agNULL) { /* free the SMTMtdIORequestBody memory allocated in tiINITaskManagement function */ ostiFreeMemory( tiRoot, SMTMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else { TI_DBG1(("ossaHwCB: SATA device but SMTMtdIORequestBody is NULL!!!\n")); } } /* set device state to DS_OPERATIONAL */ saSetDeviceState(agRoot, agNULL, tdsaRotateQnumber(tiRoot, oneDeviceData), oneDeviceData->agDevHandle, SA_DS_OPERATIONAL ); /* notify OS layer to complete the TMF IO */ ostiInitiatorEvent(tiRoot, agNULL, agNULL, tiIntrEventTypeTaskManagement, tiTMOK, currentTaskTag ); } else { TI_DBG1(("ossaHwCB: currentTaskTag is NULL!!!\n")); } break; } else { DeviceListList = DeviceListList->flink; } } #endif } break; } case OSSA_HW_EVENT_BROADCAST_ASYNCH_EVENT: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_BROADCAST_ASYNCH_EVENT\n")); if (tIsSPC12SATA(agRoot)) { TI_DBG1(("ossaHwCB: BROADCAST_ASYNCH_EVENT received for SATA Controller\n")); break; } if (agPortContext == agNULL) { TI_DBG1(("ossaHwCB: Error!!! agPortContext is NULL %d\n", PhyID)); smTraceFuncExit(hpDBG_VERY_LOUD, 'y', "Y2"); return; } onePortContext = (tdsaPortContext_t *)agPortContext->osData; if (onePortContext == agNULL) { TI_DBG1(("ossaHwCB: Error!!! onePortContext is NULL %d\n", PhyID)); smTraceFuncExit(hpDBG_VERY_LOUD, 'z', "Y2"); return; } if (onePortContext->tiPortalContext != agNULL) { #if 0 ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, OSSA_HW_EVENT_BROADCAST_ASYNCH_EVENT, agNULL ); #endif } else { TI_DBG1(("ossaHwCB: Error!!! onePortContext->tiPortalContext is NULL\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'A', "Y2"); return; } break; } case OSSA_HW_EVENT_PORT_RECOVER: { PhyID = TD_GET_PHY_ID(eventParm1); if (agPortContext == agNULL) { TI_DBG1(("ossaHwCB: Error!!! agPortContext is NULL %d\n", PhyID)); smTraceFuncExit(hpDBG_VERY_LOUD, 'B', "Y2"); return; } LinkRate = TD_GET_LINK_RATE(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); agDevHandle = agNULL; IDframe = (agsaSASIdentify_t *)eventParm3; /* 1. this is like link up 2. handle the phyID 3. no trigger discovery (broadcast change will do this later) port state must be valid */ TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PORT_RECOVER, phyID %d\n", PhyID)); if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'C', "Y2"); return; } if ( agPortContext->osData == agNULL) { /* if */ /* PortContext must exit at this point */ TI_DBG1(("ossaHwCB: NULL portalcontext. Error. Can't be NULL\n")); } else { onePortContext = (tdsaPortContext_t *)agPortContext->osData; TI_DBG2(("ossaHwCB: pid %d\n", onePortContext->id)); onePortContext->PhyIDList[PhyID] = agTRUE; onePortContext->valid = agTRUE; tdsaAllShared->Ports[PhyID].portContext = onePortContext; onePortContext->tiPortalContext = tdsaAllShared->Ports[PhyID].tiPortalContext; onePortContext->PortRecoverPhyID = PhyID; if (LinkRate == 0x01) { onePortContext->LinkRate = SAS_CONNECTION_RATE_1_5G; } else if (LinkRate == 0x02) { onePortContext->LinkRate = SAS_CONNECTION_RATE_3_0G; } else if (LinkRate == 0x04) { onePortContext->LinkRate = SAS_CONNECTION_RATE_6_0G; } else /* (LinkRate == 0x08) */ { onePortContext->LinkRate = SAS_CONNECTION_RATE_12_0G; } if (SA_IDFRM_GET_DEVICETTYPE(&onePortContext->sasIDframe) == SAS_END_DEVICE && SA_IDFRM_IS_SSP_TARGET(&onePortContext->sasIDframe) ) { TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PORT_RECOVER, sending spinup on phyID %d\n", PhyID)); for (i=0;iTransient == agTRUE && onePortContext->RegisteredDevNums == 0) { TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PORT_RECOVER transient period")); if (SA_IDFRM_GET_DEVICETTYPE(IDframe) != SAS_NO_DEVICE) { #ifdef INITIATOR_DRIVER agSASSubID.sasAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(IDframe); agSASSubID.sasAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(IDframe); agSASSubID.initiator_ssp_stp_smp = IDframe->initiator_ssp_stp_smp; agSASSubID.target_ssp_stp_smp = IDframe->target_ssp_stp_smp; tdssAddSASToSharedcontext( onePortContext, agRoot, agDevHandle, /* agNULL */ &agSASSubID, agTRUE, (bit8)PhyID, TD_OPERATION_INITIATOR ); #endif } onePortContext->Transient = agFALSE; } } break; } case OSSA_HW_EVENT_BROADCAST_SES: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); TI_DBG2(("ossaHwCB: BROADCAST_SES from PhyID %d; to be tested\n", PhyID)); if (tIsSPC12SATA(agRoot)) { TI_DBG1(("ossaHwCB: BROADCAST_SES received for SATA Controller\n")); break; } if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with BROADCAST_SES\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'D', "Y2"); return; } /* let os layer read payload */ break; } case OSSA_HW_EVENT_BROADCAST_EXP: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); TI_DBG2(("ossaHwCB: BROADCAST_EXP from PhyID %d; to be tested\n", PhyID)); if (tIsSPC12SATA(agRoot)) { TI_DBG1(("ossaHwCB: BROADCAST_EXP received for SATA Controller\n")); break; } if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with BROADCAST_EXP\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'E', "Y2"); return; } /* to-do: let os layer read payload */ break; } case OSSA_HW_EVENT_HARD_RESET_RECEIVED: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); TI_DBG2(("ossaHwCB: HARD_RESET_RECEIVED from PhyID %d\n", PhyID)); if (PortState == OSSA_PORT_VALID && tiIS_SPC(agRoot)) { TI_DBG1(("ossaHwCB: calling saPortControl and OSSA_PORT_VALID\n")); saPortControl(agRoot, agNULL, 0, agPortContext, AGSA_PORT_HARD_RESET, 0,0); } else if (PortState == OSSA_PORT_3RDPARTY_RESET && (tIsSPCV12or6G(agRoot)) ) { TI_DBG1(("ossaHwCB: calling saPortControl and OSSA_PORT_3RDPARTY_RESET\n")); saPortControl(agRoot, agNULL, 0, agPortContext, AGSA_PORT_HARD_RESET, 0,0); } else /* PortState == OSSA_PORT_INVALID */ { TI_DBG1(("ossaHwCB: Error. Port state is invalid\n")); #ifdef REMOVED TI_DBG1(("ossaHwCB: calling saLocalPhyControl on phyID %d\n", PhyID)); saLocalPhyControl(agRoot, agNULL, 0, PhyID, AGSA_PHY_LINK_RESET, agNULL); #endif } break; } case OSSA_HW_EVENT_MALFUNCTION: { #ifdef TD_DEBUG_ENABLE agsaFatalErrorInfo_t *FatalError = (agsaFatalErrorInfo_t *)eventParm2; #endif TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_MALFUNCTION \n")); TI_DBG1(("ossaHwCB: errorInfo0 %8X errorInfo1 %8X\n", FatalError->errorInfo0, FatalError->errorInfo1)); TI_DBG1(("ossaHwCB: errorInfo2 %8X errorInfo3 %8X\n", FatalError->errorInfo2, FatalError->errorInfo3)); TI_DBG1(("ossaHwCB: regDumpBusBaseNum0 %8X regDumpOffset0 %8X regDumpLen0 %8X\n", FatalError->regDumpBusBaseNum0, FatalError->regDumpOffset0, FatalError->regDumpLen0)); TI_DBG1(("ossaHwCB: regDumpBusBaseNum1 %8X regDumpOffset1 %8X regDumpLen1 %8X\n", FatalError->regDumpBusBaseNum1, FatalError->regDumpOffset1, FatalError->regDumpLen1)); if (eventParm1 == agTRUE) { TI_DBG1(("ossaHwCB: fatal error\n")); /* port panic */ ostiPortEvent ( tiRoot, tiPortPanic, 0, agNULL ); } else { TI_DBG1(("ossaHwCB: non-fatal error \n")); } break; } case OSSA_HW_EVENT_ID_FRAME_TIMEOUT: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_ID_FRAME_TIMEOUT from PhyID %d\n", PhyID)); if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with OSSA_HW_EVENT_ID_FRAME_TIMEOUT\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'F', "Y2"); return; } break; } case OSSA_HW_EVENT_PHY_ERR_INVALID_DWORD: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); agPhyErrCountersPage = (agsaPhyErrCountersPage_t *)eventParm2; TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_INVALID_DWORD\n")); if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with OSSA_HW_EVENT_PHY_ERR_INVALID_DWORD\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'G', "Y2"); return; } if (agPhyErrCountersPage != agNULL) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_INVALID_DWORD from PhyID %d\n", PhyID)); TI_DBG1(("ossaHwCB: invalidDword %d\n", agPhyErrCountersPage->invalidDword)); } else { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_INVALID_DWORD: Error!!! eventParm2 is NULL\n")); } /* saHwEventAck() */ eventSource.agPortContext = agPortContext; eventSource.event = OSSA_HW_EVENT_PHY_ERR_INVALID_DWORD; /* phy ID */ eventSource.param = PhyID; HwAckSatus = saHwEventAck( agRoot, agNULL, /* agContext */ 0, &eventSource, /* agsaEventSource_t */ 0, 0 ); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("ossaHwCB: failing in saHwEventAck; status %d\n", HwAckSatus)); smTraceFuncExit(hpDBG_VERY_LOUD, 'H', "Y2"); return; } break; } case OSSA_HW_EVENT_PHY_ERR_DISPARITY_ERROR: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); agPhyErrCountersPage = (agsaPhyErrCountersPage_t *)eventParm2; TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_DISPARITY_ERROR\n")); if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with OSSA_HW_EVENT_PHY_ERR_DISPARITY_ERROR\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'I', "Y2"); return; } if (agPhyErrCountersPage != agNULL) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_DISPARITY_ERROR from PhyID %d\n", PhyID)); TI_DBG1(("ossaHwCB: runningDisparityError %d\n", agPhyErrCountersPage->runningDisparityError)); } else { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_DISPARITY_ERROR: Error!!! eventParm2 is NULL\n")); } /* saHwEventAck() */ eventSource.agPortContext = agPortContext; eventSource.event = OSSA_HW_EVENT_PHY_ERR_DISPARITY_ERROR; /* phy ID */ eventSource.param = PhyID; HwAckSatus = saHwEventAck( agRoot, agNULL, /* agContext */ 0, &eventSource, /* agsaEventSource_t */ 0, 0 ); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("ossaHwCB: failing in saHwEventAck; status %d\n", HwAckSatus)); smTraceFuncExit(hpDBG_VERY_LOUD, 'J', "Y2"); return; } break; } case OSSA_HW_EVENT_PHY_ERR_CODE_VIOLATION: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); agPhyErrCountersPage = (agsaPhyErrCountersPage_t *)eventParm2; TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_CODE_VIOLATION\n")); if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with OSSA_HW_EVENT_PHY_ERR_CODE_VIOLATION\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'K', "Y2"); return; } if (agPhyErrCountersPage != agNULL) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_CODE_VIOLATION from PhyID %d\n", PhyID)); TI_DBG1(("ossaHwCB: codeViolation %d\n", agPhyErrCountersPage->codeViolation)); } else { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_CODE_VIOLATION: Error!!! eventParm2 is NULL\n")); } /* saHwEventAck() */ eventSource.agPortContext = agPortContext; eventSource.event = OSSA_HW_EVENT_PHY_ERR_CODE_VIOLATION; /* phy ID */ eventSource.param = PhyID; HwAckSatus = saHwEventAck( agRoot, agNULL, /* agContext */ 0, &eventSource, /* agsaEventSource_t */ 0, 0 ); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("ossaHwCB: failing in saHwEventAck; status %d\n", HwAckSatus)); smTraceFuncExit(hpDBG_VERY_LOUD, 'L', "Y2"); return; } break; } #ifdef REMOVED case OSSA_HW_EVENT_LINK_ERR_CODE_VIOLATION1: { PhyID = eventParm1 & 0xFF; agPhyErrCountersPage = (agsaPhyErrCountersPage_t *)eventParm2; if (agPhyErrCountersPage != agNULL) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_LINK_ERR_CODE_VIOLATION1 from PhyID %d\n", PhyID)); TI_DBG1(("ossaHwCB: invalidDword %d\n", agPhyErrCountersPage->invalidDword)); TI_DBG1(("ossaHwCB: runningDisparityError %d\n", agPhyErrCountersPage->runningDisparityError)); TI_DBG1(("ossaHwCB: codeViolation %d\n", agPhyErrCountersPage->codeViolation)); TI_DBG1(("ossaHwCB: lostOfDwordSynch %d\n", agPhyErrCountersPage->lossOfDwordSynch)); TI_DBG1(("ossaHwCB: phyResetProblem %d\n", agPhyErrCountersPage->phyResetProblem)); TI_DBG1(("ossaHwCB: inboundCRCError %d\n", agPhyErrCountersPage->inboundCRCError)); } else { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_LINK_ERR_CODE_VIOLATION1: Error!!! eventParm2 is NULL\n")); } break; } #endif /* REMOVED */ case OSSA_HW_EVENT_PHY_ERR_LOSS_OF_DWORD_SYNCH: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); agPhyErrCountersPage = (agsaPhyErrCountersPage_t *)eventParm2; TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_LOSS_OF_DWORD_SYNCH\n")); if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with OSSA_HW_EVENT_PHY_ERR_LOSS_OF_DWORD_SYNCH\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'M', "Y2"); return; } if (agPhyErrCountersPage != agNULL) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_LOSS_OF_DWORD_SYNCH from PhyID %d\n", PhyID)); TI_DBG1(("ossaHwCB: lostOfDwordSynch %d\n", agPhyErrCountersPage->lossOfDwordSynch)); } else { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_LOSS_OF_DWORD_SYNCH: Error!!! eventParm2 is NULL\n")); } /* saHwEventAck() */ eventSource.agPortContext = agPortContext; eventSource.event = OSSA_HW_EVENT_PHY_ERR_LOSS_OF_DWORD_SYNCH; /* phy ID */ eventSource.param = PhyID; HwAckSatus = saHwEventAck( agRoot, agNULL, /* agContext */ 0, &eventSource, /* agsaEventSource_t */ 0, 0 ); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("ossaHwCB: failing in saHwEventAck; status %d\n", HwAckSatus)); smTraceFuncExit(hpDBG_VERY_LOUD, 'N', "Y2"); return; } break; } case OSSA_HW_EVENT_PHY_ERR_PHY_RESET_FAILED: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); agPhyErrCountersPage = (agsaPhyErrCountersPage_t *)eventParm2; TI_DBG2(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_PHY_RESET_FAILED\n")); if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: Wrong port state with OSSA_HW_EVENT_PHY_ERR_PHY_RESET_FAILED\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'O', "Y2"); return; } if (agPhyErrCountersPage != agNULL) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_PHY_RESET_FAILED from PhyID %d\n", PhyID)); TI_DBG1(("ossaHwCB: phyResetProblem %d\n", agPhyErrCountersPage->phyResetProblem)); } else { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_ERR_PHY_RESET_FAILED: Error!!! eventParm2 is NULL\n")); } /* saHwEventAck() */ eventSource.agPortContext = agPortContext; eventSource.event = OSSA_HW_EVENT_PHY_ERR_PHY_RESET_FAILED; /* phy ID */ eventSource.param = PhyID; HwAckSatus = saHwEventAck( agRoot, agNULL, /* agContext */ 0, &eventSource, /* agsaEventSource_t */ 0, 0 ); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("ossaHwCB: failing in saHwEventAck; status %d\n", HwAckSatus)); smTraceFuncExit(hpDBG_VERY_LOUD, 'P', "Y2"); return; } break; } // #ifdef INITIATOR_DRIVER case OSSA_HW_EVENT_ENCRYPTION: { pEncryptCBData = (agsaHWEventEncrypt_t *) eventParm2; TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_ENCRYPTION: encryptOperation 0x%x\n",pEncryptCBData->encryptOperation)); TI_DBG1(("ossaHwCB: event 0x%x eventParm1 0x%x eventParm2 %p eventParm3 %p\n",event,eventParm1,eventParm2,eventParm3)); /* * All events and status need to be translated from * SAS specific values to TISA specific values. This * is effectively a NOP, but the OS layer won't want to * look for SAS values. */ if (pEncryptCBData->encryptOperation == OSSA_HW_ENCRYPT_KEK_UPDATE_AND_STORE) { TI_DBG1(("ossaHwCB: OSSA_HW_ENCRYPT_KEK_UPDATE_AND_STORE\n")); encryptEventData.encryptEvent = tiEncryptKekStore; } else if (pEncryptCBData->encryptOperation == OSSA_HW_ENCRYPT_KEK_UPDATE) { TI_DBG1(("ossaHwCB:OSSA_HW_ENCRYPT_KEK_UPDATE \n")); encryptEventData.encryptEvent = tiEncryptKekAdd; } else if (pEncryptCBData->encryptOperation == OSSA_HW_ENCRYPT_KEK_INVALIDTE) { TI_DBG1(("ossaHwCB:OSSA_HW_ENCRYPT_KEK_INVALIDTE \n")); /* none */ } else if (pEncryptCBData->encryptOperation == OSSA_HW_ENCRYPT_DEK_UPDATE) { TI_DBG1(("ossaHwCB: OSSA_HW_ENCRYPT_DEK_UPDATE\n")); encryptEventData.encryptEvent = tiEncryptDekAdd; } else if (pEncryptCBData->encryptOperation == OSSA_HW_ENCRYPT_DEK_INVALIDTE) { TI_DBG1(("ossaHwCB: OSSA_HW_ENCRYPT_DEK_INVALIDTE\n")); encryptEventData.encryptEvent = tiEncryptDekInvalidate; } else if (pEncryptCBData->encryptOperation == OSSA_HW_ENCRYPT_OPERATOR_MANAGEMENT) { TI_DBG1(("ossaHwCB: OSSA_HW_ENCRYPT_OPERATOR_MANAGEMENT\n")); encryptEventData.encryptEvent = tiEncryptOperatorManagement; } else if (pEncryptCBData->encryptOperation == OSSA_HW_ENCRYPT_TEST_EXECUTE) { TI_DBG1(("ossaHwCB: OSSA_HW_ENCRYPT_TEST_EXECUTE\n")); encryptEventData.encryptEvent = tiEncryptSelfTest; encryptEventData.subEvent = pEncryptCBData->eq; } else { TI_DBG1(("ossaHwCB: unknown encryptOperation 0x%x\n",pEncryptCBData->encryptOperation)); } if (pEncryptCBData->status != OSSA_SUCCESS) { encryptStatus = tiError; /* prints out status and error qualifier */ TI_DBG1(("ossaHwCB: encrypt response status 0x%x error qualifier 0x%x\n", pEncryptCBData->status, pEncryptCBData->eq)); } else { encryptStatus = tiSuccess; } if (pEncryptCBData->encryptOperation == OSSA_HW_ENCRYPT_KEK_UPDATE_AND_STORE || pEncryptCBData->encryptOperation == OSSA_HW_ENCRYPT_KEK_UPDATE ) { /* returning new KEK index */ encryptEventData.pData = pEncryptCBData->handle; } else { /* returning current KEK index or DEK index */ encryptEventData.pData = pEncryptCBData->param; } ostiPortEvent(tiRoot, tiEncryptOperation, encryptStatus, &encryptEventData); break; } case OSSA_HW_EVENT_SECURITY_MODE: { securitySetModeStatus = eventParm1; pEncryptInfo = (agsaEncryptInfo_t *) eventParm2; TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_SECURITY_MODE\n")); if (securitySetModeStatus == OSSA_SUCCESS) { securityModeStatus = tiSuccess; } else { securityModeStatus = tiError; } encryptEventData.encryptEvent = tiEncryptSetMode; /* process status to fill in subevent */ /* See PM 4.26.12.6 */ TI_DBG1(("ossaHwCB: pEncryptInfo->status 0x%x\n", pEncryptInfo->status)); if ( pEncryptInfo->status == OSSA_SUCCESS) { encryptEventData.subEvent = tiNVRAMSuccess; } else if (pEncryptInfo->status == 0x24) { encryptEventData.subEvent = tiNVRAMNotFound; } else if (pEncryptInfo->status == 0x05 || pEncryptInfo->status == 0x20 || pEncryptInfo->status == 0x21) { encryptEventData.subEvent = tiNVRAMAccessTimeout; } else { encryptEventData.subEvent = tiNVRAMWriteFail; } encryptEventData.pData = agNULL; ostiPortEvent(tiRoot, tiEncryptOperation, securityModeStatus, &encryptEventData); break; } case OSSA_HW_EVENT_MODE: { pModeEvent = (agsaHWEventMode_t *) eventParm2; pModePage = (bit32 *) pModeEvent->modePage; TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_MODE modePageOperation 0x%x status 0x%x modePageLen 0x%x\n", pModeEvent->modePageOperation, pModeEvent->status, pModeEvent->modePageLen)); if (pModeEvent->modePageOperation == agsaModePageSet) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_MODE page code 0x%x error qualifier 0x%x\n", (eventParm1 & 0xFF), (eventParm1 >> 16))); ostiPortEvent(tiRoot, tiModePageOperation, pModeEvent->status, eventParm2); } else if (pModeEvent->modePageOperation == agsaModePageGet) { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_MODE error qualifier 0x%x\n", eventParm1)); switch ((*pModePage) & 0xFF) { case AGSA_ENCRYPTION_GENERAL_CONFIG_PAGE: TI_DBG1(("ossaHwCB: AGSA_ENCRYPTION_GENERAL_CONFIG_PAGE 0x%x %p\n", pModeEvent->status,eventParm2)); TI_DBG1(("ossaHwCB:modePageOperation 0x%x status 0x%x modePageLen 0x%x modePage %p context %p\n", pModeEvent->modePageOperation, pModeEvent->status, pModeEvent->modePageLen, pModeEvent->modePage, pModeEvent->context)); ostiPortEvent(tiRoot, tiModePageOperation, pModeEvent->status, eventParm2); break; case AGSA_ENCRYPTION_DEK_CONFIG_PAGE: TI_DBG1(("ossaHwCB: AGSA_ENCRYPTION_DEK_CONFIG_PAGE 0x%x %p\n", pModeEvent->status,eventParm2)); ostiPortEvent(tiRoot, tiModePageOperation, pModeEvent->status, eventParm2); break; case AGSA_ENCRYPTION_HMAC_CONFIG_PAGE: TI_DBG1(("ossaHwCB: AGSA_ENCRYPTION_HMAC_CONFIG_PAGE 0x%x %p\n", pModeEvent->status,eventParm2)); ostiPortEvent(tiRoot, tiModePageOperation, pModeEvent->status, eventParm2); break; case AGSA_ENCRYPTION_CONTROL_PARM_PAGE: TI_DBG1(("ossaHwCB: AGSA_ENCRYPTION_CONTROL_PARM_PAGE 0x%x %p\n", pModeEvent->status,eventParm2)); /* * This page is directly related to tiCOMEncryptGetInfo() and * will be translated into a tiEncrytOperation for the OS layer. */ /* Fill out tiEncryptInfo_t */ securityMode = *pModePage & 0x0F00 >> 8; cipherMode = *pModePage & 0xF000 >> 12; if (securityMode == agsaEncryptSMA) { encryptInfo.securityCipherMode = TI_ENCRYPT_SEC_MODE_A; } else if (securityMode == agsaEncryptSMB) { encryptInfo.securityCipherMode = TI_ENCRYPT_SEC_MODE_B; } else { encryptInfo.securityCipherMode = TI_ENCRYPT_SEC_MODE_FACT_INIT; } if (cipherMode == agsaEncryptCipherModeECB) { encryptInfo.securityCipherMode |= TI_ENCRYPT_ATTRIB_CIPHER_ECB; } if (cipherMode == agsaEncryptCipherModeXTS) { encryptInfo.securityCipherMode |= TI_ENCRYPT_ATTRIB_CIPHER_XTS; } /* How will subEvents be tracked? */ encryptInfo.status = 0; encryptInfo.sectorSize[0] = 512; /* DIF is allowed on 512 BPS SATA drives */ encryptInfo.sectorSize[1] = 520; encryptInfo.sectorSize[2] = 528; encryptInfo.sectorSize[3] = 4104; encryptInfo.sectorSize[4] = 4168; encryptInfo.sectorSize[5] = 4232; encryptEventData.encryptEvent = tiEncryptGetInfo; encryptEventData.subEvent = 0; encryptEventData.pData = &encryptInfo; ostiPortEvent(tiRoot, tiEncryptOperation, pModeEvent->status, &encryptEventData); break; case AGSA_SAS_PROTOCOL_TIMER_CONFIG_PAGE: TI_DBG1(("ossaHwCB: AGSA_SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x%x %p\n", pModeEvent->status,eventParm2)); #ifdef IOCTL_INTERRUPT_TIME_CONFIG ostiPortEvent(tiRoot, tiModePageOperation, pModeEvent->status, eventParm2 ); #endif /* IOCTL_INTERRUPT_TIME_CONFIG */ /*ostiPortEvent(tiRoot, tiModePageOperation, pModeEvent->status, &encryptEventData);*/ break; case AGSA_INTERRUPT_CONFIGURATION_PAGE: TI_DBG1(("ossaHwCB: AGSA_INTERRUPT_CONFIGURATION_PAGE 0x%x %p\n", pModeEvent->status,eventParm2)); #ifdef IOCTL_INTERRUPT_TIME_CONFIG ostiPortEvent(tiRoot, tiModePageOperation, pModeEvent->status, eventParm2 ); #endif /* IOCTL_INTERRUPT_TIME_CONFIG */ break; default: TI_DBG1(("ossaHwCB: Unknown Mode Event %x\n", *pModePage)); break; } } else { TI_DBG1(("ossaHwCB: Unknown modePageOperation %x\n", pModeEvent->modePageOperation)); } break; } // #endif /* INITIATOR_DRIVER */ #ifdef REMOVED case OSSA_HW_EVENT_PHY_UNRECOVERABLE_ERROR: { PhyID = TD_GET_PHY_ID(eventParm1); PortState = TD_GET_PORT_STATE(eventParm1); TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_PHY_UNRECOVERABLE_ERROR\n")); if (PortState == OSSA_PORT_INVALID) { TI_DBG1(("ossaHwCB: INVALID port state\n")); } else { TI_DBG1(("ossaHwCB: VALID port state\n")); } break; } #endif /* REMOVED */ case OSSA_HW_EVENT_OPEN_RETRY_BACKOFF_THR_ADJUSTED: { TI_DBG1(("ossaHwCB: OSSA_HW_EVENT_OPEN_RETRY_BACKOFF_THR_ADJUSTED\n")); break; } default: { TI_DBG1(("ossaHwCB: default error (0x%X)!!!!!\n",event)); break; } } smTraceFuncExit(hpDBG_VERY_LOUD, 'R', "Y2"); return; } osGLOBAL void ossaPortControlCB( agsaRoot_t *agRoot, agsaContext_t *agContext, agsaPortContext_t *agPortContext, bit32 portOperation, bit32 status) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaPortContext_t *onePortContext = agNULL; TI_DBG6(("ossaPortControlCB: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Y3"); if (portOperation == AGSA_PORT_SET_SMP_PHY_WIDTH) { TI_DBG1(("ossaPortControlCB: portOperation AGSA_PORT_SET_SMP_PHY_WIDTH\n")); } else if (portOperation == AGSA_PORT_SET_PORT_RECOVERY_TIME) { TI_DBG1(("ossaPortControlCB: portOperation AGSA_PORT_SET_PORT_RECOVERY_TIME\n")); } else if (portOperation == AGSA_PORT_IO_ABORT) { TI_DBG1(("ossaPortControlCB: portOperation AGSA_PORT_IO_ABORT\n")); /* code is here because disocvery failed deregister all targets. Then, later call discovery if broacast is seen in ossaDeregisterDeviceHandleCB. */ onePortContext = (tdsaPortContext_t *)agPortContext->osData; if (onePortContext == agNULL) { TI_DBG1(("ossaPortControlCB: onePortContext is NULL\n")); return; } /* qqqqq deregister all devices */ tdsaDeregisterDevicesInPort(tiRoot, onePortContext); } else if (portOperation == AGSA_PORT_SET_PORT_RESET_TIME) { TI_DBG1(("ossaPortControlCB: portOperation AGSA_PORT_SET_PORT_RESET_TIME\n")); } else if (portOperation == AGSA_PORT_HARD_RESET) { TI_DBG1(("ossaPortControlCB: portOperation AGSA_PORT_HARD_RESET\n")); } else if (portOperation == AGSA_PORT_CLEAN_UP) { TI_DBG1(("ossaPortControlCB: portOperation AGSA_PORT_CLEAN_UP\n")); } else if (portOperation == AGSA_STOP_PORT_RECOVERY_TIMER) { TI_DBG1(("ossaPortControlCB: portOperation AGSA_STOP_PORT_RECOVERY_TIMER\n")); } else { TI_DBG1(("ossaPortControlCB: undefined portOperation %d\n", portOperation)); } TI_DBG1(("ossaPortControlCB: status %d\n", status)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Y3"); return; } /***************************************************************************** *! \brief ossaHwRegRead * * Purpose: This routine is called to read a 32-bit value from the PCI * registers of the controller * * \param agRoot: Pointer to chip/driver Instance. * \param regOffset: Byte offset to chip register from which to read a 32-bit * value. * * \return: 32-bit value. * * \note - The scope is shared target and initiator. * *****************************************************************************/ FORCEINLINE bit32 ossaHwRegRead(agsaRoot_t *agRoot, bit32 regOffset ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *) (agRoot->osData); bit32 return_value; return_value = ostiChipReadBit32 ( osData->tiRoot, regOffset ); if( agNULL != agRoot->sdkData ) { smTrace(hpDBG_REGISTERS,"RR",regOffset); /* TP:RR regOffset */ smTrace(hpDBG_REGISTERS,"RV",return_value); /* TP:RV value read */ } return(return_value); } /***************************************************************************** *! \brief ossaHwRegWrite * * Purpose: This routine is called to write a 32-bit value to the PCI * registers of the controller. * * \param agRoot: Pointer to chip/driver Instance. * \param regOffset: Byte offset to chip register to which chipIOValue is * written. * \param regValue: 32-bit value to write at chipIOOffset in host byte order. * * \return: None. * * \note - The scope is shared target and initiator. * *****************************************************************************/ FORCEINLINE void ossaHwRegWrite(agsaRoot_t *agRoot, bit32 regOffset, bit32 regValue ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *) (agRoot->osData); if( agNULL != agRoot->sdkData ) { smTrace(hpDBG_REGISTERS,"RW",regOffset); /* TP:RW regOffset */ smTrace(hpDBG_REGISTERS,"VW",regValue); /* TP:VW value written */ } ostiChipWriteBit32 ( osData->tiRoot, regOffset, regValue ); return; } /***************************************************************************** *! \brief ossaHwRegReadExt * * Purpose: This routine is called to read a 32-bit value from a bus-specific * mapped registers of the controller * * \param agRoot: Pointer to chip/driver Instance. * \param regOffset: Byte offset to chip register from which to read a 32-bit * value. * * \return: 32-bit value. * * \note - The scope is shared target and initiator. * *****************************************************************************/ FORCEINLINE bit32 ossaHwRegReadExt( agsaRoot_t *agRoot, bit32 busBaseNumber, bit32 regOffset ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *) (agRoot->osData); bit32 return_value; return_value = ostiChipReadBit32Ext( osData->tiRoot, busBaseNumber, regOffset ); /* TI_DBG4(("#_R: 0x%x:0x%x=0x%x\n",busBaseNumber,regOffset,return_value)); */ if( agNULL != agRoot->sdkData ) { smTrace(hpDBG_REGISTERS,"EB",busBaseNumber); /* TP:EB EX read busBaseNumber */ smTrace(hpDBG_REGISTERS,"EO",regOffset); /* TP:EO regOffset */ smTrace(hpDBG_REGISTERS,"ER",return_value); /* TP:ER value read */ } return(return_value); } void ossaPCI_TRIGGER(agsaRoot_t *agRoot ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *) (agRoot->osData); ostiPCI_TRIGGER(osData->tiRoot); } /***************************************************************************** *! \brief ossaHwRegWriteExt * * Purpose: This routine is called to write a 32-bit value to a bus specific * mapped registers of the controller. * * \param agRoot: Pointer to chip/driver Instance. * \param regOffset: Byte offset to chip register to which chipIOValue is * written. * \param regValue: 32-bit value to write at chipIOOffset in host byte order. * * \return: None. * * \note - The scope is shared target and initiator. * *****************************************************************************/ FORCEINLINE void ossaHwRegWriteExt( agsaRoot_t *agRoot, bit32 busBaseNumber, bit32 regOffset, bit32 regValue ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *) (agRoot->osData); ostiChipWriteBit32Ext( osData->tiRoot, busBaseNumber, regOffset, regValue ); /* TI_DBG4(("#_W: 0x%x:0x%x=0x%x\n",busBaseNumber,regOffset,regValue)); */ if( agNULL != agRoot->sdkData ) { smTrace(hpDBG_REGISTERS,"Eb",busBaseNumber); /* TP:Eb Ex Write busBaseNumber */ smTrace(hpDBG_REGISTERS,"Eo",regOffset); /* TP:Eo regOffset */ smTrace(hpDBG_REGISTERS,"Ew",regValue); /* TP:Ew value written regValue*/ } return; } osGLOBAL bit32 ossaHwRegReadConfig32( agsaRoot_t *agRoot, bit32 regOffset ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *) (agRoot->osData); bit32 to_ret; to_ret= ostiChipConfigReadBit32( osData->tiRoot, regOffset); TI_DBG4(("ossaHwRegReadConfig32: regOffset 0x%x returns 0x%x\n",regOffset,to_ret)); return(to_ret); } #ifdef TD_INT_COALESCE void ossaIntCoalesceInitCB( agsaRoot_t *agRoot, agsaIntCoalesceContext_t *agIntCoContext, bit32 status ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)osData->tdsaAllShared; tiIntCoalesceContext_t *tiIntCoalesceCxt; tdsaIntCoalesceContext_t *tdsaIntCoalCxt; tdsaIntCoalesceContext_t *tdsaIntCoalCxtHead - = (tdsaIntCoalesceContext_t *)tdsaAllShared->IntCoalesce;; + = (tdsaIntCoalesceContext_t *)tdsaAllShared->IntCoalesce; bit32 tiStatus; TI_DBG2(("ossaIntCoalesceInitCB: start\n")); tdsaIntCoalCxt = (tdsaIntCoalesceContext_t *)agIntCoContext->osData; tiIntCoalesceCxt = tdsaIntCoalCxt->tiIntCoalesceCxt; switch (status) { case AGSA_RC_SUCCESS: tiStatus = tiSuccess; break; case AGSA_RC_BUSY: tiStatus = tiBusy; break; case AGSA_RC_FAILURE: tiStatus = tiError; break; default: TI_DBG1(("ossaIntCoalesceInitCB: unknown status %d\n", status)); tiStatus = tiError; break; } TI_DBG2(("ossaIntCoalesceInitCB: status %d\n", tiStatus)); /* enqueue tdsaIntCoalCxt to freelink */ tdsaIntCoalCxt->tiIntCoalesceCxt = agNULL; TI_DBG2(("ossaIntCoalesceInitCB: id %d\n", tdsaIntCoalCxt->id)); tdsaSingleThreadedEnter(tiRoot, TD_INTCOAL_LOCK); TDLIST_DEQUEUE_THIS(&(tdsaIntCoalCxt->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(tdsaIntCoalCxt->FreeLink), &(tdsaIntCoalCxtHead->FreeLink)); tdsaSingleThreadedLeave(tiRoot, TD_INTCOAL_LOCK); #ifdef OS_INT_COALESCE ostiInitiatorIntCoalesceInitCB(tiRoot, tiIntCoalesceCxt, tiStatus); #endif TI_DBG2(("ossaIntCoalesceInitCB: return end\n")); return; } #endif /* TD_INT_COALESCE */ /*****************************************************************************/ /*! \brief ossaSingleThreadedEnter * * * Purpose: This routine is called to ensure that only a single thread of * the given port instance executes code in the region protected by * this function. * * * \param agRoot: Pointer to chip/driver Instance. * \param syncLockId to be explained. * * * \return None. * * \note - The scope is shared target and initiator. * */ /*****************************************************************************/ FORCEINLINE void ossaSingleThreadedEnter( agsaRoot_t *agRoot, bit32 syncLockId ) { tdsaRootOsData_t *pOsData = agNULL; tiRoot_t *ptiRoot = agNULL; tdsaContext_t *tdsaAllShared = agNULL; TD_ASSERT(agRoot, "agRoot"); pOsData = (tdsaRootOsData_t *) (agRoot->osData); TD_ASSERT(pOsData, "pOsData"); ptiRoot = pOsData->tiRoot; TD_ASSERT(ptiRoot, "ptiRoot"); tdsaAllShared = (tdsaContext_t *)pOsData->tdsaAllShared; TD_ASSERT(tdsaAllShared, "tdsaAllShared"); ostiSingleThreadedEnter(ptiRoot, syncLockId + tdsaAllShared->MaxNumOSLocks); return; } /*****************************************************************************/ /*! \brief ossaSingleThreadedLeave * * * Purpose: This routine is called to leave a critical region of code * previously protected by a call to osSingleThreadedEnter() * * * \param agRoot: Pointer to chip/driver Instance. * \param syncLockId to be explained. * * * \return None. * * \note - The scope is shared target and initiator. * */ /*****************************************************************************/ FORCEINLINE void ossaSingleThreadedLeave( agsaRoot_t *agRoot, bit32 syncLockId ) { tdsaRootOsData_t *pOsData = agNULL; tiRoot_t *ptiRoot = agNULL; tdsaContext_t *tdsaAllShared = agNULL; TD_ASSERT(agRoot, "agRoot"); pOsData = (tdsaRootOsData_t *) (agRoot->osData); TD_ASSERT(pOsData, "pOsData"); ptiRoot = pOsData->tiRoot; TD_ASSERT(ptiRoot, "ptiRoot"); tdsaAllShared = (tdsaContext_t *)pOsData->tdsaAllShared; TD_ASSERT(tdsaAllShared, "tdsaAllShared"); ostiSingleThreadedLeave(ptiRoot, syncLockId + tdsaAllShared->MaxNumOSLocks); return; } #ifdef PERF_COUNT osGLOBAL void ossaEnter(agsaRoot_t *agRoot, int io) { ostiEnter(((tdsaRootOsData_t*)(agRoot->osData))->tiRoot, 0, io); return; } osGLOBAL void ossaLeave(agsaRoot_t *agRoot, int io) { ostiLeave(((tdsaRootOsData_t*)(agRoot->osData))->tiRoot, 0, io); return; } #endif osGLOBAL void ossaSSPIoctlCompleted( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, bit32 agIOInfoLen, void *agParam, bit16 sspTag, bit32 agOtherInfo ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdIORequestBody_t *tdIORequestBody = (tdIORequestBody_t *)agIORequest->osData; agsaSASRequestBody_t *agSASRequestBody = agNULL; agsaSSPInitiatorRequest_t *agSSPFrame = agNULL; bit8 scsiOpcode = 0; agSASRequestBody = &(tdIORequestBody->transport.SAS.agSASRequestBody); agSSPFrame = &(agSASRequestBody->sspInitiatorReq); scsiOpcode = agSSPFrame->sspCmdIU.cdb[0]; TI_DBG2(("ossaSSPIoctlCompleted: start\n")); if (agIOStatus == OSSA_SUCCESS) { TI_DBG2(("ossaSSPIoctlCompleted: Success status\n")); } else { TI_DBG1(("ossaSSPIoctlCompleted: Status 0x%x\n", agIOStatus)); } switch(scsiOpcode) { case REPORT_LUN_OPCODE: ostiNumOfLUNIOCTLRsp(tiRoot, agIOStatus); break; default: TI_DBG1(("ossaSSPIoctlCompleted: Unsupported SCSI command Response 0x%x\n",scsiOpcode)); break; } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yi"); return; } osGLOBAL void ossaSMPIoctlCompleted( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, bit32 agIOInfoLen, agsaFrameHandle_t agFrameHandle ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG2(("ossaSMPIoctlCompleted: start\n")); if (agIOStatus == OSSA_SUCCESS) { TI_DBG2(("ossaSMPIoctlCompleted: Success status\n")); } else { TI_DBG1(("ossaSMPIoctlCompleted: Status 0x%x\n", agIOStatus)); } ostiSendSMPIOCTLRsp(tiRoot, agIOStatus); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yi"); return; } /*****************************************************************************/ /*! \brief ossaSMPCompleted * * * Purpose: This routine is called by lower layer to indicate the completion of * SMP request * * \param agRoot: Pointer to chip/driver Instance. * \param agIORequest Pointer to SMP request handle * \param agIOStatus Status * \param agFrameHeader:Pointer to SMP frame header. * \param agIOInfoLen IO information length assoicated with the IO * \param agFrameHandle A Handle used to refer to the response frame * * * \return None. * * \note - The scope is shared target and initiator. * For details, refer to SAS/SATA Low-Level API Specification */ /*****************************************************************************/ osGLOBAL void ossaSMPCompleted( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, bit32 agIOInfoLen, agsaFrameHandle_t agFrameHandle ) { #ifdef PASSTHROUGH tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdPassthroughCmndBody_t *tdPTCmndBody = (tdPassthroughCmndBody_t *)agIORequest->osData; bit32 tiStatus = tiPassthroughError; bit8 SMPframe[agIOInfoLen + sizeof(agsaSMPFrameHeader_t)]; bit8 SMPpayload[agIOInfoLen]; TI_DBG2(("ossaSMPCompleted: start and passthrough\n")); #else /* not PASSTHROUGH */ tdssSMPRequestBody_t *pSMPRequestBody = (tdssSMPRequestBody_t *) agIORequest->osData; TI_DBG4(("ossaSMPCompleted: start\n")); #endif /* end not PASSTHROUGH */ TDSA_OUT_ENTER((tiRoot_t *)((tdsaRootOsData_t *)agRoot->osData)->tiRoot); smTraceFuncEnter(hpDBG_VERY_LOUD,"Y4"); #ifdef PASSTHROUGH if (tdPTCmndBody == agNULL) { TI_DBG1(("ossaSMPCompleted: tdPTCmndBody is NULL \n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Y4"); goto ext; } if (tdPTCmndBody->EventCB == agNULL) { TI_DBG1(("ossaSMPCompleted: tdPTCmndBody->EventCB is NULL \n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "Y4"); goto ext; } if (agIOStatus == OSSA_IO_SUCCESS) { tiStatus = tiPassthroughSuccess; } else if (agIOStatus == OSSA_IO_ABORTED) { tiStatus = tiPassthroughAborted; } else { tiStatus = tiPassthroughError; } osti_memset(SMPpayload, 0, agIOInfoLen); osti_memset(SMPframe, 0, agIOInfoLen + sizeof(agsaSMPFrameHeader_t)); /* combine the header and payload */ saFrameReadBlock(agRoot, agFrameHandle, 0, &SMPpayload, agIOInfoLen); osti_memcpy(SMPframe, agFrameHeader, sizeof(agsaSMPFrameHeader_t)); osti_memcpy(SMPframe+sizeof(agsaSMPFrameHeader_t), SMPpayload, agIOInfoLen); tdPTCmndBody->EventCB(tiRoot, tdPTCmndBody->tiPassthroughRequest, tiStatus, SMPframe, agIOInfoLen + sizeof(agsaSMPFrameHeader_t) ); #else /* not PASSTHROUGH */ /* At initiator, passing SMP to TD layer, itdssSMPCompleted(), which does nothing. At target, passing SMP to TD layer, ttdsaSMPCompleted() */ /* how to use agFrameHandle, when saFrameReadBlock() is used */ /* SPC can't be SMP target */ TI_DBG4(("ossaSMPCompleted: start\n")); if (pSMPRequestBody == agNULL) { TI_DBG1(("ossaSMPCompleted: pSMPRequestBody is NULL \n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "Y4"); goto ext; } if (pSMPRequestBody->SMPCompletionFunc == agNULL) { TI_DBG1(("ossaSMPCompleted: pSMPRequestBody->SMPCompletionFunc is NULL \n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "Y4"); goto ext; } #ifdef TD_INTERNAL_DEBUG /* debugging */ TI_DBG4(("ossaSMPCompleted: agIOrequest %p\n", agIORequest->osData)); TI_DBG4(("ossaSMPCompleted: sizeof(tdIORequestBody_t) %d 0x%x\n", sizeof(tdIORequestBody_t), sizeof(tdIORequestBody_t))); TI_DBG4(("ossaSMPCompleted: SMPRequestbody %p\n", pSMPRequestBody)); TI_DBG4(("ossaSMPCompleted: calling callback fn\n")); TI_DBG4(("ossaSMPCompleted: callback fn %p\n",pSMPRequestBody->SMPCompletionFunc)); #endif /* TD_INTERNAL_DEBUG */ /* if initiator, calling itdssSMPCompleted() in itdcb.c if target, calling ttdsaSMPCompleted() in ttdsmp.c */ pSMPRequestBody->SMPCompletionFunc( agRoot, agIORequest, agIOStatus, agIOInfoLen, agFrameHandle ); #endif /* Not PASSTHROUGH */ smTraceFuncExit(hpDBG_VERY_LOUD, 'e', "Y4"); ext: TDSA_OUT_LEAVE((tiRoot_t *)((tdsaRootOsData_t *)agRoot->osData)->tiRoot); return; } osGLOBAL void ossaSMPReqReceived( agsaRoot_t *agRoot, agsaDevHandle_t *agDevHandle, agsaFrameHandle_t agFrameHandle, bit32 agIOInfoLen, bit32 phyId ) { smTraceFuncEnter(hpDBG_VERY_LOUD,"Y5"); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Y5"); return; } /*****************************************************************************/ /*! \brief ossaSMPCAMCompleted * * * Purpose: This routine is called by lower layer to indicate the completion of * SMP request * * \param agRoot: Pointer to chip/driver Instance. * \param agIORequest Pointer to SMP request handle * \param agIOStatus Status * \param agIOInfoLen IO information length assoicated with the IO * \param agFrameHandle A Handle used to refer to the response frame * * * \return None. * * \note - The scope is shared target and initiator. * For details, refer to SAS/SATA Low-Level API Specification */ /*****************************************************************************/ osGLOBAL void ossaSMPCAMCompleted( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, bit32 agIOInfoLen, agsaFrameHandle_t agFrameHandle ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdIORequestBody_t *tdSMPRequestBody = agNULL; bit32 context = osData->IntContext; tiSMPStatus_t status; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; void *osMemHandle; bit32 *SMPpayload; TI_DBG2(("ossaSMPCAMCompleted: start\n")); TI_DBG2(("ossaSMPCAMCompleted: agIOInfoLen %d\n", agIOInfoLen)); if (!agIORequest->osData) { TD_ASSERT((0), "ossaSMPCAMCompleted agIORequest->osData"); goto ext; } tdSMPRequestBody = (tdIORequestBody_t *)agIORequest->osData; if (tdSMPRequestBody->tiIORequest->osData == agNULL) { TI_DBG1(("ossaSMPCAMCompleted: tdIORequestBody->tiIORequest->osData is null, wrong\n")); goto ext; } /* allocating agIORequest for SMP Payload itself */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&SMPpayload, &PhysUpper32, &PhysLower32, 8, agIOInfoLen, agTRUE ); if (memAllocStatus != tiSuccess) { /* let os process IO */ TI_DBG1(("ossaSMPCAMCompleted: ostiAllocMemory failed...\n")); goto ext; } if (SMPpayload == agNULL) { TI_DBG1(("ossaSMPCAMCompleted: ostiAllocMemory returned NULL SMPpayload\n")); goto ext; } if (agIOStatus == OSSA_IO_SUCCESS) { TI_DBG1(("ossaSMPCAMCompleted: Success status\n")); osti_memset(SMPpayload, 0, agIOInfoLen); TI_DBG1(("ossaSMPCAMCompleted: after memset\n")); saFrameReadBlock(agRoot, agFrameHandle, 0, SMPpayload, agIOInfoLen); TI_DBG1(("ossaSMPCAMCompleted: after read \n")); status = tiSMPSuccess; } else if (agIOStatus == OSSA_IO_ABORTED) { TI_DBG1(("ossaSMPCAMCompleted: SMP Aborted status\n")); status = tiSMPAborted; TI_DBG1(("ossaSMPCAMCompleted: failed status=%d\n", status)); //failed to send smp command, we need to free the memory ostiFreeMemory( tiRoot, osMemHandle, agIOInfoLen ); } else { TI_DBG1(("ossaSMPCAMCompleted: SMP failed status\n")); status = tiSMPFailed; TI_DBG1(("ossaSMPCAMCompleted: failed status=%d\n", status)); //failed to send smp command, we need to free the memory ostiFreeMemory( tiRoot, osMemHandle, agIOInfoLen ); } ostiInitiatorSMPCompleted(tiRoot, tdSMPRequestBody->tiIORequest, status, agIOInfoLen, SMPpayload, context ); ext: TDSA_OUT_LEAVE((tiRoot_t*)((tdsaRootOsData_t*)agRoot->osData)->tiRoot); return; } #ifdef REMOVED #ifdef TARGET_DRIVER /*****************************************************************************/ /*! \brief ossaSMPReqReceived * * * Purpose: This routine is called by lower layer to indicate the reception of * SMP request * * \param agRoot: Pointer to chip/driver Instance. * \param agDevHandle Pointer to the device handle of the device * \param agFrameHandle A Handle used to refer to the response frame * * * \return None. * * \note - The scope is target only * For details, refer to SAS/SATA Low-Level API Specification */ /*****************************************************************************/ osGLOBAL void ossaSMPReqReceived( agsaRoot_t *agRoot, agsaDevHandle_t *agDevHandle, agsaFrameHandle_t agFrameHandle, bit32 agFrameLength, bit32 phyId ) { bit8 smpHeader[4]; agsaSMPFrameHeader_t *agFrameHeader; #ifdef PASSTHROUGH /* call the registered function(parameter in tiTGTPassthroughCmndRegister() by target */ tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; ttdsaTgt_t *Target = (ttdsaTgt_t *)osData->ttdsaTgt; bit8 SMPframe[agIOInfoLen + sizeof(agsaSMPFrameHeader_t)]; bit8 SMPpayload[agIOInfoLen]; TI_DBG2(("ossaSMPReqReceived: start and passthrough\n")); osti_memset(SMPpayload, 0, agIOInfoLen); osti_memset(SMPframe, 0, agIOInfoLen + sizeof(agsaSMPFrameHeader_t)); /* combine smp header and payload */ saFrameReadBlock(agRoot, agFrameHandle, 0, &SMPpayload, agIOInfoLen); osti_memcpy(SMPframe, agFrameHeader, sizeof(agsaSMPFrameHeader_t)); osti_memcpy(SMPframe+sizeof(agsaSMPFrameHeader_t), SMPpayload, agIOInfoLen); Target->PasthroughCB( tiRoot, tiSASATA, tiSMP, tiSMPResponse, SMPframe, agIOInfoLen + sizeof(agsaSMPFrameHeader_t), phyId ); #else /* agDevHandle_t->osData points to tdssDeviceData_t */ tdsaDeviceData_t *pDeviceData = (tdsaDeviceData_t *) agDevHandle->osData; saFrameReadBlock(agRoot, agFrameHandle, 0, smpHeader, 4); agFrameHeader = (agsaSMPFrameHeader_t *)smpHeader; TI_DBG4(("ossaSMPReqReceived: start\n")); /* tdtypes.h, calling ttdsaSMPReqReceived in ttdsmp.c */ pDeviceData->pJumpTable->pSMPReqReceived ( agRoot, agDevHandle, agFrameHeader, agFrameHandle, agFrameLength, phyId ); #endif return; } #endif #endif /*****************************************************************************/ /*! \brief ossaSSPCompleted * * * Purpose: This routine is called by lower layer to indicate the completion of * SSP request * * \param agRoot: Pointer to chip/driver Instance. * \param agIORequest Pointer to SMP request handle * \param agIOStatus Status * \param agIOInfoLen IO information length assoicated with the IO * \param agFrameHandle A Handle used to refer to the response frame * * * \return None. * * \note - The scope is shared target and initiator. * For details, refer to SAS/SATA Low-Level API Specification */ /*****************************************************************************/ FORCEINLINE void ossaSSPCompleted( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, bit32 agIOInfoLen, void *agParam, bit16 sspTag, bit32 agOtherInfo ) { tdIORequestBody_t *pIORequestBody; #ifdef TD_DEBUG_ENABLE tiDeviceHandle_t *tiDeviceHandle = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; #endif TDSA_OUT_ENTER((tiRoot_t*)((tdsaRootOsData_t*)agRoot->osData)->tiRoot); smTraceFuncEnter(hpDBG_VERY_LOUD,"2L"); if(!agIORequest->osData) { TD_ASSERT((0), "ossaSSPCompleted agIORequest->osData"); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "2L"); goto ext; } pIORequestBody = (tdIORequestBody_t *)agIORequest->osData; TI_DBG4(("ossaSSPCompleted: start\n")); if (pIORequestBody == agNULL) { TI_DBG1(("ossaSSPCompleted: pIORequestBody is NULL \n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "2L"); goto ext; } if (pIORequestBody->IOCompletionFunc == agNULL) { #ifdef TD_DEBUG_ENABLE tiDeviceHandle = pIORequestBody->tiDevHandle; oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; #endif TI_DBG1(("ossaSSPCompleted: IOCompletionFunc is NULL \n")); TI_DBG1(("ossaSSPCompleted: did %d \n", oneDeviceData->id)); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "2L"); goto ext; } /* if initiator, calling itdssIOCompleted() in itdcb.c if initiator, calling itdssTaskCompleted in itdcb.c if target, calling ttdsaIOCompleted() in ttdio.c */ pIORequestBody->IOCompletionFunc( agRoot, agIORequest, agIOStatus, agIOInfoLen, agParam, agOtherInfo ); smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "2L"); ext: TDSA_OUT_LEAVE((tiRoot_t*)((tdsaRootOsData_t*)agRoot->osData)->tiRoot); return; } #ifdef FAST_IO_TEST GLOBAL void ossaFastSSPCompleted( agsaRoot_t *agRoot, agsaIORequest_t *cbArg, bit32 agIOStatus, bit32 agIOInfoLen, void *agParam, bit16 sspTag, bit32 agOtherInfo ) { agsaFastCBBuf_t *safb = (agsaFastCBBuf_t*)cbArg; tdsaRootOsData_t *osData = (tdsaRootOsData_t*)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t*)osData->tiRoot; bit32 scsi_status; bit32 data_status; bit32 respLen; bit8 respData[128]; bit32 senseLen; agsaSSPResponseInfoUnit_t agSSPRespIU; TDSA_OUT_ENTER((tiRoot_t*)((tdsaRootOsData_t*)agRoot->osData)->tiRoot); smTraceFuncEnter(hpDBG_VERY_LOUD,"Y6"); TI_DBG4(("ossaSSPCompleted: start\n")); if (safb->cb == agNULL || safb->cbArg == agNULL) { TI_DBG1(("ossaFastSSPCompleted: pIORequestBody is NULL \n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Y6"); TD_ASSERT((0), ""); goto ext; } switch (agIOStatus) { case OSSA_IO_SUCCESS: /* ~ itdssIOSuccessHandler */ if ((agIOInfoLen < sizeof(agsaSSPResponseInfoUnit_t))) { ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, OSSA_IO_SUCCESS, 0); break; } /* reads agsaSSPResponseInfoUnit_t */ saFrameReadBlock(agRoot, agParam, 0, &agSSPRespIU, sizeof(agsaSSPResponseInfoUnit_t)); data_status = SA_SSPRESP_GET_DATAPRES(&agSSPRespIU); scsi_status = agSSPRespIU.status; TI_DBG1(("itdssIOSuccessHandler: scsi_status %d\n", scsi_status)); /* endianess is invovled here */ senseLen = SA_SSPRESP_GET_SENSEDATALEN(&agSSPRespIU); respLen = SA_SSPRESP_GET_RESPONSEDATALEN(&agSSPRespIU); TI_DBG2(("itdssIOSuccessHandler: scsi status=0x%x, senselen=0x%x resplen " "0x%x\n", scsi_status, senseLen, respLen)); if (agIOInfoLen < sizeof(agsaSSPResponseInfoUnit_t) + senseLen + respLen) { ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOFailed, tiDetailOtherError); break; } /* reads response data */ saFrameReadBlock(agRoot, agParam, sizeof(agsaSSPResponseInfoUnit_t), respData, respLen); /* reads sense data */ saFrameReadBlock(agRoot, agParam, sizeof(agsaSSPResponseInfoUnit_t) + respLen, safb->pSenseData, senseLen); if (data_status == 0) { /* NO_DATA */ TI_DBG2(("ossaFastSSPCompleted: no data\n")); ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOSuccess, scsi_status); break; } if (data_status == 1) { /* RESPONSE_DATA */ TI_DBG1(("ossaFastSSPCompleted: response data \n")); ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOSuccess, 0); break; } if (data_status == 2) { tiSenseData_t senseData; /* SENSE_DATA */ TI_DBG2(("itdssIOSuccessHandler: sense data \n")); senseData.senseData = safb->pSenseData; senseData.senseLen = MIN(*(safb->senseLen), senseLen); /* when ASC = 0x04 - Log Unit Not Ready, and ASCQ = 0x11 - Enable Spinup Required: call saLocalPhyControl to notify spinup */ if (((char*)safb->pSenseData)[12] == 0x04 && ((char*)safb->pSenseData)[13] == 0x11) { int i; TI_DBG2(("ossaFastSSPCompleted: sending notfify spinup\n")); if (((tdsaDeviceData_t*)safb->oneDeviceData)->directlyAttached == agTRUE) { for (i = 0; i < TD_MAX_NUM_NOTIFY_SPINUP; i++) { saLocalPhyControl(agRoot, agNULL, 0, ((tdsaDeviceData_t*)safb->oneDeviceData)->phyID, AGSA_PHY_NOTIFY_ENABLE_SPINUP, agNULL); } } } if (*(safb->senseLen) > senseData.senseLen) *(safb->senseLen) = senseData.senseLen; // memcpy((void *)safb->pSenseData, senseData.senseData, safb->senseLen); ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOSuccess, scsi_status); break; } if (data_status == 3) { /* RESERVED */ TI_DBG1(("ossaFastSSPCompleted: reserved wrong!!!\n")); ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOFailed, scsi_status); break; } break; #ifdef REMOVED case OSSA_IO_OVERFLOW: ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOOverRun, agIOInfoLen); break; #endif /* REMOVED */ case OSSA_IO_UNDERFLOW: ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOUnderRun, agIOInfoLen); break; case OSSA_IO_ABORTED: ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOFailed, tiDetailAborted); break; case OSSA_IO_ABORT_RESET: ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOFailed, tiDetailAbortReset); break; case OSSA_IO_NO_DEVICE: ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOFailed, tiDetailNoLogin); break; case OSSA_IO_DS_NON_OPERATIONAL: { tdsaDeviceData_t *oneDeviceData; oneDeviceData = (tdsaDeviceData_t*)safb->oneDeviceData; if (oneDeviceData->valid == agTRUE && oneDeviceData->registered == agTRUE && oneDeviceData->tdPortContext != agNULL) { saSetDeviceState(oneDeviceData->agRoot, agNULL, tdsaRotateQnumber(tiRoot, oneDeviceData), oneDeviceData->agDevHandle, SA_DS_OPERATIONAL); } /* fall through */ } default: ((ostiFastSSPCb_t)safb->cb)(tiRoot, safb->cbArg, tiIOFailed, tiDetailOtherError); break; } smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "Y6"); ext: TDSA_OUT_LEAVE((tiRoot_t*)((tdsaRootOsData_t*)agRoot->osData)->tiRoot); return; } /* ossaFastSSPCompleted */ #endif /*****************************************************************************/ /*! \brief ossaSSPReqReceived * * * Purpose: This routine is called by lower layer to indicate the reception of * SMP request * * \param agRoot: Pointer to chip/driver Instance. * \param agDevHandle Pointer to the device handle of the device * \param agFrameHandle A Handle used to refer to the response frame * \param agInitiatorTag the initiator tag * \param agFrameType SSP frame type * * \return none. * * \note - The scope is target only * For details, refer to SAS/SATA Low-Level API Specification */ /*****************************************************************************/ osGLOBAL void ossaSSPReqReceived( agsaRoot_t *agRoot, agsaDevHandle_t *agDevHandle, agsaFrameHandle_t agFrameHandle, bit16 agInitiatorTag, bit32 parameter, bit32 agFrameLen ) { /* at target only uses jumptable, not callback */ /* agDevHandle_t->osData points to tdssDeviceData_t */ tdsaDeviceData_t *pDeviceData = (tdsaDeviceData_t *) agDevHandle->osData; smTraceFuncEnter(hpDBG_VERY_LOUD,"Y7"); /* tdtypes.h, calling ttdsaSSPReqReceived() in ttdio.c */ pDeviceData->pJumpTable->pSSPReqReceived ( agRoot, agDevHandle, agFrameHandle, agInitiatorTag, parameter, agFrameLen ); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Y7"); return; } /*****************************************************************************/ /*! \brief ossaStallThread * * * Purpose: This routine is called to stall this thread for a number of * microseconds. * * * \param agRoot: Pointer to chip/driver Instance. * \param microseconds: Micro second to stall. * * * \return None. * * \note - The scope is shared target and initiator. * */ /*****************************************************************************/ osGLOBAL void ossaStallThread(agsaRoot_t *agRoot, bit32 microseconds ) { tdsaRootOsData_t *pOsData = (tdsaRootOsData_t *) (agRoot->osData); ostiStallThread ( pOsData->tiRoot, microseconds ); return; } /***************************************************************************** *! \brief ossaSSPEvent * * This routine is called to notify the OS Layer of an event associated with * SAS port or SAS device * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param agIORequest Pointer to IO request * \param event: event type * \param agIOInfoLen: not in use * \param agFrameHandle: not in use * * \return: none * *****************************************************************************/ /* in case of CMD ACK_NAK timeout, send query task */ osGLOBAL void ossaSSPEvent( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, agsaPortContext_t *agPortContext, agsaDevHandle_t *agDevHandle, bit32 event, bit16 sspTag, bit32 agIOInfoLen, void *agParam ) { #ifdef INITIATOR_DRIVER tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; /* bit32 intContext = osData->IntContext; */ void *osMemHandle; tdIORequestBody_t *TMtdIORequestBody; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; bit32 agRequestType; agsaIORequest_t *agTMIORequest = agNULL; /* task management itself */ agsaSASRequestBody_t *agSASRequestBody = agNULL; agsaSSPScsiTaskMgntReq_t *agSSPTaskMgntRequest; bit32 saStatus; bit32 agIORequestType; /* type of IO recevied */ tiIORequest_t *taskTag; /* being task managed one */ tdIORequestBody_t *tdIORequestBody; #endif #ifdef REMOVED tiDeviceHandle_t *tiDeviceHandle; tdsaDeviceData_t *oneDeviceData = agNULL; tdIORequestBody_t *tdAbortIORequestBody; #endif agsaDifDetails_t agDifDetails; bit8 framePayload[256]; #ifdef REMOVED bit16 frameOffset = 0; #endif bit16 frameLen = 0; TI_DBG6(("ossaSSPEvent: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Y9"); if (event == OSSA_IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT || event == OSSA_IO_XFER_ERROR_BREAK || event == OSSA_IO_XFER_ERROR_PHY_NOT_READY ) { /* IO being task managed(the original IO) depending on event */ #ifdef INITIATOR_DRIVER tdIORequestBody = (tdIORequestBody_t *)agIORequest->osData; taskTag = tdIORequestBody->tiIORequest; #endif #ifdef REMOVED tiDeviceHandle = tdIORequestBody->tiDevHandle; oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; #endif #ifdef INITIATOR_DRIVER agIORequestType = tdIORequestBody->agRequestType; /* error checking; only command is expected here */ if (agIORequestType == AGSA_REQ_TYPE_UNKNOWN) { TI_DBG1(("ossaSSPEvent: incorrect frame 0x%x. Should be command\n", agIORequestType)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Y9"); return; } /* Allocate memory for query task management */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&TMtdIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { /* let os process IO */ TI_DBG1(("ossaSSPEvent: ostiAllocMemory failed...\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "Y9"); return; } if (TMtdIORequestBody == agNULL) { /* let os process IO */ TI_DBG1(("ossaSSPEvent: ostiAllocMemory returned NULL TMIORequestBody\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "Y9"); return; } /* setup task management structure */ TMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; /* TD generates Query Task not OS layer */ TMtdIORequestBody->IOType.InitiatorTMIO.CurrentTaskTag = agNULL; TMtdIORequestBody->IOType.InitiatorTMIO.TaskTag = taskTag; /* initialize callback function */ TMtdIORequestBody->IOCompletionFunc = itdssQueryTaskCompleted; /* initialize tiDevhandle */ TMtdIORequestBody->tiDevHandle = tdIORequestBody->tiDevHandle; /* initialize agIORequest */ agTMIORequest = &(TMtdIORequestBody->agIORequest); agTMIORequest->osData = (void *) TMtdIORequestBody; agTMIORequest->sdkData = agNULL; /* LL takes care of this */ /* request type */ agRequestType = AGSA_SSP_TASK_MGNT_REQ; TMtdIORequestBody->agRequestType = AGSA_SSP_TASK_MGNT_REQ; /* initialize tdIORequestBody_t tdIORequestBody -> agSASRequestBody */ agSASRequestBody = &(TMtdIORequestBody->transport.SAS.agSASRequestBody); agSSPTaskMgntRequest = &(agSASRequestBody->sspTaskMgntReq); /* fill up LUN field */ osti_memset(agSSPTaskMgntRequest->lun, 0, 8); /* sets taskMgntFunction field */ agSSPTaskMgntRequest->taskMgntFunction = AGSA_QUERY_TASK; /* debugging */ if (TMtdIORequestBody->IOCompletionFunc == agNULL) { TI_DBG1(("ossaSSPEvent: Error !!! IOCompletionFunc is NULL\n")); } /* send query task management */ saStatus = saSSPStart(agRoot, agTMIORequest, 0, agDevHandle, agRequestType, agSASRequestBody, agIORequest, &ossaSSPCompleted); if (saStatus != AGSA_RC_SUCCESS) { /* free up allocated memory */ ostiFreeMemory( tiRoot, TMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); TI_DBG1(("ossaSSPEvent: saSSPStart failed\n")); return; } #endif } #ifdef REMOVED else if (event == OSSA_IO_ABORTED) { TI_DBG2(("ossaSSPEvent: OSSA_IO_ABORTED\n")); /* clean up TD layer's IORequestBody */ tdAbortIORequestBody = (tdIORequestBody_t *)agIORequest->osData; ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (event == OSSA_IO_NOT_VALID) { TI_DBG1(("ossaSSPEvent: OSSA_IO_NOT_VALID\n")); tdAbortIORequestBody = (tdIORequestBody_t *)agIORequest->osData; ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #endif else if (event == OSSA_IO_XFER_CMD_FRAME_ISSUED) { TI_DBG2(("ossaSSPEvent: OSSA_IO_XFER_CMD_FRAME_ISSUED\n")); } else if (event == OSSA_IO_XFER_ERROR_OFFSET_MISMATCH) { TI_DBG1(("ossaSSPEvent: OSSA_IO_XFER_ERROR_OFFSET_MISMATCH\n")); } else if (event == OSSA_IO_OVERFLOW) { TI_DBG1(("ossaSSPEvent: OSSA_IO_OVERFLOW\n")); /* ??? can't call; missing agIOInfoLen ostiInitiatorIOCompleted ( tiRoot, tdIORequestBody->tiIORequest, tiIOOverRun, agIOInfoLen, agNULL, intContext ); */ } else if (event == OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED) { TI_DBG1(("ossaSSPEvent: OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED\n")); } else if (event == OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO) { TI_DBG1(("ossaSSPEvent: OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO\n")); } else if (event == OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST) { TI_DBG1(("ossaSSPEvent: OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST\n")); } else if (event == OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE) { TI_DBG1(("ossaSSPEvent: OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE\n")); } else if (event == OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED) { TI_DBG1(("ossaSSPEvent: OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED\n")); } else if (event == OSSA_IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH) { TI_DBG1(("ossaSSPEvent: OSSA_IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH\n")); } else if (event == OSSA_IO_XFER_ERROR_XFER_RDY_OVERRUN) { TI_DBG1(("ossaSSPEvent: OSSA_IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); } else if (event == OSSA_IO_XFR_ERROR_DIF_MISMATCH || event == OSSA_IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH || event == OSSA_IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH || event == OSSA_IO_XFR_ERROR_DIF_CRC_MISMATCH ) { TI_DBG1(("ossaSSPEvent: DIF related, event 0x%x\n", event)); /* process DIF detail information */ TI_DBG2(("ossaSSPEvent: agIOInfoLen %d\n", agIOInfoLen)); if (agParam == agNULL) { TI_DBG2(("ossaSSPEvent: agParam is NULL!!!\n")); return; } if (agIOInfoLen < sizeof(agsaDifDetails_t)) { TI_DBG2(("ossaSSPEvent: wrong agIOInfoLen!!! agIOInfoLen %d sizeof(agsaDifDetails_t) %d\n", agIOInfoLen, (int)sizeof(agsaDifDetails_t))); return; } /* reads agsaDifDetails_t */ saFrameReadBlock(agRoot, agParam, 0, &agDifDetails, sizeof(agsaDifDetails_t)); #ifdef REMOVED frameOffset = (agDifDetails.ErrBoffsetEDataLen & 0xFFFF); #endif frameLen = (bit16)((agDifDetails.ErrBoffsetEDataLen & 0xFFFF0000) >> 16); TI_DBG2(("ossaSSPEvent: UpperLBA 0x%08x LowerLBA 0x%08x\n", agDifDetails.UpperLBA, agDifDetails.LowerLBA)); TI_DBG2(("ossaSSPEvent: SASAddrHI 0x%08x SASAddrLO 0x%08x\n", TD_GET_SAS_ADDRESSHI(agDifDetails.sasAddressHi), TD_GET_SAS_ADDRESSLO(agDifDetails.sasAddressLo))); TI_DBG2(("ossaSSPEvent: DIF error mask 0x%x Device ID 0x%x\n", (agDifDetails.DIFErrDevID) & 0xFF, (agDifDetails.DIFErrDevID & 0xFFFF0000) >> 16)); if (frameLen != 0 && frameLen <= 256) { saFrameReadBlock(agRoot, agParam, sizeof(agsaDifDetails_t), framePayload, frameLen); tdhexdump("ossaSSPEvent frame", framePayload, frameLen); } } else { TI_DBG1(("ossaSSPEvent: other event 0x%x\n", event)); } smTraceFuncExit(hpDBG_VERY_LOUD, 'd', "Y9"); return; } #ifdef FDS_SM osGLOBAL void ossaSATAIDAbortCB( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 flag, bit32 status) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdIORequestBody_t *tdAbortIORequestBody; TI_DBG1(("ossaSATAIDAbortCB: start flag %d status %d\n", flag, status)); tdAbortIORequestBody = (tdIORequestBody_t *)agIORequest->osData; /* triggered by tdIDStartTimerCB */ ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } #endif #ifdef INITIATOR_DRIVER osGLOBAL void ossaSSPAbortCB( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 flag, bit32 status) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdIORequestBody_t *tdAbortIORequestBody = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; tiDeviceHandle_t *tiDeviceHandle = agNULL; tiIORequest_t *taskTag = agNULL; TI_DBG2(("ossaSSPAbortCB: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Ya"); tdAbortIORequestBody = (tdIORequestBody_t *)agIORequest->osData; if (tdAbortIORequestBody == agNULL) { TI_DBG1(("ossaSSPAbortCB: tdAbortIORequestBody is NULL warning!!!!\n")); return; } if (flag == 2) { /* abort per port */ TI_DBG1(("ossaSSPAbortCB: abort per port\n")); } else if (flag == 1) { TI_DBG2(("ossaSSPAbortCB: abort all\n")); tiDeviceHandle = (tiDeviceHandle_t *)tdAbortIORequestBody->tiDevHandle; if (tiDeviceHandle == agNULL) { TI_DBG1(("ossaSSPAbortCB: tiDeviceHandle is NULL warning!!!!\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; if (oneDeviceData == agNULL) { TI_DBG1(("ossaSSPAbortCB: oneDeviceData is NULL warning!!!!\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } if (status == OSSA_IO_SUCCESS) { TI_DBG2(("ossaSSPAbortCB: OSSA_IO_SUCCESS\n")); /* clean up TD layer's IORequestBody */ if (oneDeviceData->OSAbortAll == agTRUE) { oneDeviceData->OSAbortAll = agFALSE; ostiInitiatorEvent( tiRoot, agNULL, tiDeviceHandle, tiIntrEventTypeLocalAbort, tiAbortOK, agNULL); } else { TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); } TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NOT_VALID) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_NOT_VALID\n")); /* clean up TD layer's IORequestBody */ if (oneDeviceData->OSAbortAll == agTRUE) { oneDeviceData->OSAbortAll = agFALSE; ostiInitiatorEvent( tiRoot, agNULL, tiDeviceHandle, tiIntrEventTypeLocalAbort, tiAbortFailed, agNULL ); } else { TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); } TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NO_DEVICE) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_NO_DEVICE\n")); /* clean up TD layer's IORequestBody */ if (oneDeviceData->OSAbortAll == agTRUE) { oneDeviceData->OSAbortAll = agFALSE; ostiInitiatorEvent( tiRoot, agNULL, tiDeviceHandle, tiIntrEventTypeLocalAbort, tiAbortInProgress, agNULL ); } else { TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); } TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_ABORT_IN_PROGRESS) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_ABORT_IN_PROGRESS\n")); /* clean up TD layer's IORequestBody */ if (oneDeviceData->OSAbortAll == agTRUE) { oneDeviceData->OSAbortAll = agFALSE; ostiInitiatorEvent( tiRoot, agNULL, tiDeviceHandle, tiIntrEventTypeLocalAbort, tiAbortInProgress, agNULL ); } else { TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); } TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #ifdef REMOVED else if (status == OSSA_IO_ABORT_DELAYED) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_ABORT_DELAYED\n")); /* clean up TD layer's IORequestBody */ if (oneDeviceData->OSAbortAll == agTRUE) { oneDeviceData->OSAbortAll = agFALSE; ostiInitiatorEvent( tiRoot, agNULL, tiDeviceHandle, tiIntrEventTypeLocalAbort, tiAbortDelayed, agNULL ); } else { TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); } TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #endif else { TI_DBG1(("ossaSSPAbortCB: other status %d\n", status)); /* clean up TD layer's IORequestBody */ if (oneDeviceData->OSAbortAll == agTRUE) { oneDeviceData->OSAbortAll = agFALSE; ostiInitiatorEvent( tiRoot, agNULL, tiDeviceHandle, tiIntrEventTypeLocalAbort, tiAbortInProgress, agNULL ); } else { TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); } TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } } else if (flag == 0) { TI_DBG2(("ossaSSPAbortCB: abort one\n")); taskTag = tdAbortIORequestBody->tiIOToBeAbortedRequest; if ( taskTag == agNULL) { TI_DBG1(("ossaSSPAbortCB: taskTag is NULL; triggered by itdssQueryTaskCompleted\n")); } if (status == OSSA_IO_SUCCESS) { TI_DBG2(("ossaSSPAbortCB: OSSA_IO_SUCCESS\n")); if (taskTag != agNULL) { ostiInitiatorEvent( tiRoot, agNULL, agNULL, tiIntrEventTypeLocalAbort, tiAbortOK, taskTag ); } ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NOT_VALID) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_NOT_VALID\n")); if (taskTag != agNULL) { ostiInitiatorEvent( tiRoot, agNULL, agNULL, tiIntrEventTypeLocalAbort, tiAbortFailed, taskTag ); } ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NO_DEVICE) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_NO_DEVICE\n")); if (taskTag != agNULL) { ostiInitiatorEvent( tiRoot, agNULL, agNULL, tiIntrEventTypeLocalAbort, tiAbortInProgress, taskTag ); } ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_ABORT_IN_PROGRESS) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_ABORT_IN_PROGRESS\n")); if (taskTag != agNULL) { ostiInitiatorEvent( tiRoot, agNULL, agNULL, tiIntrEventTypeLocalAbort, tiAbortInProgress, taskTag ); } ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #ifdef REMOVED else if (status == OSSA_IO_ABORT_DELAYED) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_ABORT_DELAYED\n")); if (taskTag != agNULL) { ostiInitiatorEvent( tiRoot, agNULL, agNULL, tiIntrEventTypeLocalAbort, tiAbortDelayed, taskTag ); } ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #endif else { TI_DBG1(("ossaSSPAbortCB: other status %d\n", status)); if (taskTag != agNULL) { ostiInitiatorEvent( tiRoot, agNULL, agNULL, tiIntrEventTypeLocalAbort, tiAbortFailed, taskTag ); } ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } } else { TI_DBG1(("ossaSSPAbortCB: wrong flag %d\n", flag)); } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Ya"); return; } #endif #ifdef TARGET_DRIVER osGLOBAL void ossaSSPAbortCB( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 flag, bit32 status) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdIORequestBody_t *tdAbortIORequestBody; tdsaDeviceData_t *oneDeviceData; tiDeviceHandle_t *tiDeviceHandle; TI_DBG3(("ossaSSPAbortCB: start\n")); tdAbortIORequestBody = (tdIORequestBody_t *)agIORequest->osData; if (flag == 2) { /* abort per port */ TI_DBG2(("ossaSSPAbortCB: abort per port\n")); } else if (flag == 1) { TI_DBG2(("ossaSSPAbortCB: abort all\n")); tiDeviceHandle = (tiDeviceHandle_t *)tdAbortIORequestBody->tiDevHandle; oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; if (status == OSSA_IO_SUCCESS) { TI_DBG2(("ossaSSPAbortCB: OSSA_IO_SUCCESS\n")); /* clean up TD layer's IORequestBody */ TI_DBG3(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NOT_VALID) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_NOT_VALID\n")); /* clean up TD layer's IORequestBody */ TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NO_DEVICE) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_NO_DEVICE\n")); /* clean up TD layer's IORequestBody */ TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_ABORT_IN_PROGRESS) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_ABORT_IN_PROGRESS\n")); /* clean up TD layer's IORequestBody */ TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #ifdef REMOVED else if (status == OSSA_IO_ABORT_DELAYED) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_ABORT_DELAYED\n")); /* clean up TD layer's IORequestBody */ TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG2(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #endif else { TI_DBG1(("ossaSSPAbortCB: other status %d\n", status)); /* clean up TD layer's IORequestBody */ TI_DBG2(("ossaSSPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG1(("ossaSSPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } } else if (flag == 0) { TI_DBG2(("ossaSSPAbortCB: abort one\n")); if (status == OSSA_IO_SUCCESS) { TI_DBG2(("ossaSSPAbortCB: OSSA_IO_SUCCESS\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NOT_VALID) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_NOT_VALID\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NO_DEVICE) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_NO_DEVICE\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_ABORT_IN_PROGRESS) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_ABORT_IN_PROGRESS\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #ifdef REMOVED else if (status == OSSA_IO_ABORT_DELAYED) { TI_DBG1(("ossaSSPAbortCB: OSSA_IO_ABORT_DELAYED\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #endif else { TI_DBG1(("ossaSSPAbortCB: other status %d\n", status)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } } else { TI_DBG1(("ossaSSPAbortCB: wrong flag %d\n", flag)); } return; } #endif /*****************************************************************************/ /*! \brief ossaLocalPhyControlCB * * * Purpose: This routine is called by lower layer to indicate the status of * phy operations * * \param agRoot: Pointer to chip/driver Instance. * \param phyId Phy id * \param phyOperation Operation to be done on the phy * \param status Phy operation specific completion status * \param parm Additional parameter, phy operation and status specific * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaLocalPhyControlCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 phyId, bit32 phyOperation, bit32 status, void *parm ) { #ifdef REMVOED agsaPhyErrCounters_t *agPhyErrCounters; #endif #ifdef INITIATOR_DRIVER tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tiIORequest_t *currentTaskTag; tdsaDeviceData_t *TargetDeviceData; satDeviceData_t *pSatDevData; agsaDevHandle_t *agDevHandle = agNULL; agsaContext_t *agContextDevice; #endif smTraceFuncEnter(hpDBG_VERY_LOUD,"Yb"); TI_DBG3(("ossaLocalPhyControlCB: start phyID %d\n", phyId)); TI_DBG3(("ossaLocalPhyControlCB: phyOperation %d status 0x%x\n", phyOperation, status)); switch (phyOperation) { case AGSA_PHY_LINK_RESET: /* fall through */ case AGSA_PHY_HARD_RESET: if (phyOperation == AGSA_PHY_LINK_RESET) { TI_DBG1(("ossaLocalPhyControlCB: AGSA_PHY_LINK_RESET, status 0x%x\n", status)); } else { TI_DBG1(("ossaLocalPhyControlCB: AGSA_PHY_HARD_RESET, status 0x%x\n", status)); } #ifdef INITIATOR_DRIVER if (agContext != agNULL) { currentTaskTag = (tiIORequest_t *)agContext->osData; if (status == OSSA_SUCCESS) { if (currentTaskTag != agNULL) { TI_DBG2(("ossaLocalPhyControlCB: callback to OS layer with success\n")); TargetDeviceData = (tdsaDeviceData_t *)currentTaskTag->tdData; pSatDevData = (satDeviceData_t *)&(TargetDeviceData->satDevData); agDevHandle = TargetDeviceData->agDevHandle; TI_DBG2(("ossaLocalPhyControlCB: satPendingIO %d satNCQMaxIO %d\n", pSatDevData->satPendingIO, pSatDevData->satNCQMaxIO )); TI_DBG2(("ossaLocalPhyControlCB: satPendingNCQIO %d satPendingNONNCQIO %d\n", pSatDevData->satPendingNCQIO, pSatDevData->satPendingNONNCQIO)); pSatDevData->satDriveState = SAT_DEV_STATE_NORMAL; if (TargetDeviceData->TRflag == agTRUE) { saSetDeviceState(agRoot, agNULL, tdsaRotateQnumber(tiRoot, TargetDeviceData), agDevHandle, SA_DS_OPERATIONAL); TargetDeviceData->TRflag = agFALSE; ostiInitiatorEvent(tiRoot, TargetDeviceData->tdPortContext->tiPortalContext, &(TargetDeviceData->tiDeviceHandle), tiIntrEventTypeTransportRecovery, tiRecOK, agNULL ); } else { agDevHandle = TargetDeviceData->agDevHandle; if (agDevHandle == agNULL) { TI_DBG1(("ossaLocalPhyControlCB: wrong, agDevHandle is NULL\n")); } /* move this to OSSA_HW_EVENT_PORT_RESET_COMPLETE in ossaHwCB() */ agContextDevice = &(TargetDeviceData->agDeviceResetContext); agContextDevice->osData = currentTaskTag; #ifdef REMOVED ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMOK, currentTaskTag ); #endif } } } else { if (currentTaskTag != agNULL) { TI_DBG1(("ossaLocalPhyControlCB: callback to OS layer with failure\n")); TargetDeviceData = (tdsaDeviceData_t *)currentTaskTag->tdData; pSatDevData = (satDeviceData_t *)&(TargetDeviceData->satDevData); TI_DBG1(("ossaLocalPhyControlCB: satPendingIO %d satNCQMaxIO %d\n", pSatDevData->satPendingIO, pSatDevData->satNCQMaxIO )); TI_DBG1(("ossaLocalPhyControlCB: satPendingNCQIO %d satPendingNONNCQIO %d\n", pSatDevData->satPendingNCQIO, pSatDevData->satPendingNONNCQIO)); if (TargetDeviceData->TRflag == agTRUE) { TargetDeviceData->TRflag = agFALSE; ostiInitiatorEvent(tiRoot, TargetDeviceData->tdPortContext->tiPortalContext, &(TargetDeviceData->tiDeviceHandle), tiIntrEventTypeTransportRecovery, tiRecFailed , agNULL ); } else { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, currentTaskTag ); } } } } #endif break; #ifdef REMOVED case AGSA_PHY_GET_ERROR_COUNTS: TI_DBG2(("ossaLocalPhyControlCB: AGSA_PHY_GET_ERROR_COUNTS, status 0x%x\n", status)); if(parm !=agNULL ) { agPhyErrCounters = (agsaPhyErrCounters_t *)parm; TI_DBG2(("ossaLocalPhyControlCB: invalidDword %d\n", agPhyErrCounters->invalidDword)); TI_DBG2(("ossaLocalPhyControlCB: runningDisparityError %d\n", agPhyErrCounters->runningDisparityError)); TI_DBG2(("ossaLocalPhyControlCB: lostOfDwordSynch %d\n", agPhyErrCounters->lossOfDwordSynch)); TI_DBG2(("ossaLocalPhyControlCB: phyResetProblem %d\n", agPhyErrCounters->phyResetProblem)); TI_DBG2(("ossaLocalPhyControlCB: elasticityBufferOverflow %d\n", agPhyErrCounters->elasticityBufferOverflow)); TI_DBG2(("ossaLocalPhyControlCB: receivedErrorPrimitive %d\n", agPhyErrCounters->receivedErrorPrimitive)); } break; case AGSA_PHY_CLEAR_ERROR_COUNTS: TI_DBG2(("ossaLocalPhyControlCB: AGSA_PHY_CLEAR_ERROR_COUNTS, status 0x%x\n", status)); break; #endif case AGSA_PHY_NOTIFY_ENABLE_SPINUP: TI_DBG2(("ossaLocalPhyControlCB: AGSA_PHY_NOTIFY_ENABLE_SPINUP, status 0x%x\n", status)); break; case AGSA_PHY_BROADCAST_ASYNCH_EVENT: TI_DBG2(("ossaLocalPhyControlCB: AGSA_PHY_BROADCAST_ASYNCH_EVENT, status 0x%x\n", status)); if (tIsSPC12SATA(agRoot)) { TI_DBG1(("ossaLocalPhyControlCB: BROADCAST_ASYNCH_EVENT received for SATA Controller\n")); break; } break; case AGSA_PHY_COMINIT_OOB : TI_DBG2(("ossaLocalPhyControlCB: AGSA_PHY_COMINIT_OOB, status 0x%x\n", status)); break; default: TI_DBG1(("ossaLocalPhyControlCB: UNKNOWN default case. phyOperation %d status 0x%x\n", phyOperation, status)); break; } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yb"); return; } GLOBAL void ossaGetPhyProfileCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 ppc, bit32 phyID, void *parm ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; #ifdef CCFLAGS_PHYCONTROL_COUNTS agsaPhyAnalogSettingsPage_t *analog; #endif /* CCFLAGS_PHYCONTROL_COUNTS */ tdPhyCount_t *PhyBlob = agNULL; agsaPhyBWCountersPage_t *agBWCounters; agsaPhyErrCountersPage_t *agPhyErrCounters; TI_DBG1(("ossaGetPhyProfileCB: agContext %p parm %p\n", agContext, parm)); /* if( tdsaAllShared->tdFWControlEx.inProgress ) { tdsaAllShared->tdFWControlEx.inProgress = 0; PhyBlob = (tdPhyCount_t *)tdsaAllShared->tdFWControlEx.usrAddr; } */ switch(ppc) { case AGSA_SAS_PHY_BW_COUNTERS_PAGE: TI_DBG1(("ossaGetPhyProfileCB: AGSA_SAS_PHY_BW_COUNTERS_PAGE, status 0x%x phyID %d\n", status, phyID)); if(parm !=agNULL ) { agBWCounters = (agsaPhyBWCountersPage_t *)parm; TI_DBG1(("ossaGetPhyProfileCB: RX %d TX %d\n", agBWCounters->RXBWCounter,agBWCounters->TXBWCounter)); if(PhyBlob !=agNULL ) { PhyBlob->InvalidDword = 0; PhyBlob->runningDisparityError = 0; PhyBlob->codeViolation = 0; PhyBlob->phyResetProblem = 0; PhyBlob->inboundCRCError = 0; PhyBlob->BW_rx = agBWCounters->RXBWCounter; PhyBlob->BW_tx = agBWCounters->TXBWCounter; } } break; case AGSA_SAS_PHY_ERR_COUNTERS_PAGE: if( tdsaAllShared->tdFWControlEx.inProgress ) { tdsaAllShared->tdFWControlEx.inProgress = 0; PhyBlob = (tdPhyCount_t *)tdsaAllShared->tdFWControlEx.usrAddr; } TI_DBG1(("ossaGetPhyProfileCB: AGSA_SAS_PHY_ERR_COUNTERS_PAGE, status 0x%x phyID %d\n", status, phyID)); if(parm !=agNULL ) { agPhyErrCounters = (agsaPhyErrCountersPage_t *)parm; if(PhyBlob !=agNULL ) { PhyBlob->InvalidDword = agPhyErrCounters->invalidDword; PhyBlob->runningDisparityError = agPhyErrCounters->runningDisparityError; PhyBlob->LossOfSyncDW = agPhyErrCounters->lossOfDwordSynch; PhyBlob->codeViolation = agPhyErrCounters->codeViolation; PhyBlob->phyResetProblem = agPhyErrCounters->phyResetProblem; PhyBlob->inboundCRCError = agPhyErrCounters->inboundCRCError; PhyBlob->BW_rx = 0; PhyBlob->BW_tx = 0; TI_DBG2(("ossaGetPhyProfileCB: invalidDword %d\n", agPhyErrCounters->invalidDword)); TI_DBG2(("ossaGetPhyProfileCB: runningDisparityError %d\n", agPhyErrCounters->runningDisparityError)); TI_DBG2(("ossaGetPhyProfileCB: lostOfDwordSynch %d\n", agPhyErrCounters->lossOfDwordSynch)); TI_DBG2(("ossaGetPhyProfileCB: phyResetProblem %d\n", agPhyErrCounters->phyResetProblem)); TI_DBG2(("ossaGetPhyProfileCB: inboundCRCError %d\n", agPhyErrCounters->inboundCRCError)); } } break; case AGSA_SAS_PHY_ERR_COUNTERS_CLR_PAGE: TI_DBG1(("ossaGetPhyProfileCB: AGSA_SAS_PHY_ERR_COUNTERS_CLR_PAGE status 0x%x phyID %d\n", status, phyID)); break; case AGSA_SAS_PHY_ANALOG_SETTINGS_PAGE: TI_DBG1(("ossaGetPhyProfileCB:AGSA_SAS_PHY_ANALOG_SETTINGS_PAGE status 0x%x phyID %d\n", status, phyID)); #ifdef CCFLAGS_PHYCONTROL_COUNTS if(parm !=agNULL ) { analog = (agsaPhyAnalogSettingsPage_t *)parm; TI_DBG1(("ossaGetPhyProfileCB: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", analog->Dword0, analog->Dword1, analog->Dword2, analog->Dword3, analog->Dword4, analog->Dword5, analog->Dword6, analog->Dword7, analog->Dword8, analog->Dword9)); tdsaAllShared->analog[phyID].spaRegister0 = analog->Dword0; tdsaAllShared->analog[phyID].spaRegister1 = analog->Dword1; tdsaAllShared->analog[phyID].spaRegister2 = analog->Dword2; tdsaAllShared->analog[phyID].spaRegister3 = analog->Dword3; tdsaAllShared->analog[phyID].spaRegister4 = analog->Dword4; saSetPhyProfile( agRoot,agContext,tdsaRotateQnumber(tiRoot, agNULL), AGSA_SAS_PHY_ANALOG_SETTINGS_PAGE,sizeof(agsaPhyAnalogSetupRegisters_t),&tdsaAllShared->analog[phyID],phyID); } #endif /* CCFLAGS_PHYCONTROL_COUNTS */ break; case AGSA_SAS_PHY_OPEN_REJECT_RETRY_BACKOFF_THRESHOLD_PAGE: { TI_DBG1(("ossaGetPhyProfileCB:AGSA_SAS_PHY_OPEN_REJECT_RETRY_BACKOFF_THRESHOLD_PAGE status 0x%x phyID %d\n", status, phyID)); if( parm !=agNULL ) { #ifdef TD_DEBUG_ENABLE agsaSASPhyOpenRejectRetryBackOffThresholdPage_t *Backoff = (agsaSASPhyOpenRejectRetryBackOffThresholdPage_t *)parm; #endif TI_DBG2(("ossaGetPhyProfileCB: DW0 0x%X DW1 0x%X DW2 0x%X DW3 0x%X\n", Backoff->Dword0,Backoff->Dword1, Backoff->Dword2,Backoff->Dword3)); } break; } case AGSA_SAS_PHY_GENERAL_STATUS_PAGE: { agsaSASPhyGeneralStatusPage_t * GenStatus = NULL; TI_DBG1(("ossaGetPhyProfileCB: AGSA_SAS_PHY_GENERAL_STATUS_PAGE status 0x%x phyID %d\n", status, phyID)); if( parm !=agNULL ) { GenStatus= (agsaSASPhyGeneralStatusPage_t *)parm; TI_DBG2(("ossaGetPhyProfileCB: " "AGSA_SAS_PHY_GENERAL_STATUS_PAGE status %d DW0 0x%x DW1 0x%x\n", status, GenStatus->Dword0, GenStatus->Dword1)); } ostiGetPhyGeneralStatusRsp(tiRoot, GenStatus, phyID); // break; return ; } default: TI_DBG1(("ossaGetPhyProfileCB: UNKNOWN default case. phyOperation %d status 0x%x\n", ppc, status)); break; } ostiGetPhyProfileIOCTLRsp(tiRoot, status); } GLOBAL void ossaSetPhyProfileCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 ppc, bit32 phyID, void *parm ) { TI_DBG1(("ossaSetPhyProfileCB:agContext %p status 0x%x ppc %d phyID %d parm %p\n",agContext, status, ppc, phyID,parm)); } /*****************************************************************************/ /*! \brief ossaGetDeviceHandlesCB * * * Purpose: This routine is called by lower layer to corresponding to * saGetDeviceHandles() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the get device handle request originally passed into * saGetDeviceHandles(). * \param agPortContext:Pointer to this instance of a port context * \param agDev: Array containing pointers to the device handles * \param validDevs Number of valid device handles * * * \return None. * * \note - The scope is shared target and initiator. * For details, refer to SAS/SATA Low-Level API Specification */ /*****************************************************************************/ osGLOBAL void ossaGetDeviceHandlesCB( agsaRoot_t *agRoot, agsaContext_t *agContext, agsaPortContext_t *agPortContext, agsaDevHandle_t *agDev[], bit32 validDevs ) { TI_DBG2(("ossaGetDeviceHandlesCB: start\n")); TI_DBG2(("ossaGetDeviceHandlesCB: validDevs %d\n", validDevs)); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yc"); #ifdef TO_DO for (i = 0 ; i < validDevs ; i++) { agDev[i]; } #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yc"); return; } /*****************************************************************************/ /*! \brief ossaGetDeviceInfoCB * * * Purpose: This routine is called by lower layer to corresponding to * saGetDeviceInfo() * * \param agRoot: Pointer to chip/driver Instance. * \param agDevHandle: Handle of the device * \param status: status * \param agInfo: Pointer to the structure that describes device information * * * \return None. * * \note - The scope is shared target and initiator. * For details, refer to SAS/SATA Low-Level API Specification */ /*****************************************************************************/ osGLOBAL void ossaGetDeviceInfoCB( agsaRoot_t *agRoot, agsaContext_t *agContext, agsaDevHandle_t *agDevHandle, bit32 status, void *agInfo ) { #ifdef TD_DEBUG_ENABLE agsaDeviceInfo_t *agDeviceInfo; agsaSASDeviceInfo_t *agSASDeviceInfo; agsaSATADeviceInfo_t *agSATADeviceInfo; #endif smTraceFuncEnter(hpDBG_VERY_LOUD,"Yd"); TI_DBG1(("ossaGetDeviceInfoCB: start agContext %p\n",agContext)); switch (status) { case OSSA_DEV_INFO_INVALID_HANDLE: TI_DBG1(("ossaGetDeviceInfoCB: OSSA_DEV_INFO_INVALID_HANDLE\n")); /*ostiGetDeviceInfoIOCTLRsp(tiRoot, status, agNULL);*/ break; case OSSA_DEV_INFO_NO_EXTENDED_INFO: #ifdef TD_DEBUG_ENABLE agDeviceInfo = (agsaDeviceInfo_t *)agInfo; #endif TI_DBG1(("ossaGetDeviceInfoCB: OSSA_DEV_INFO_NO_EXTENDED_INFO\n")); TI_DBG1(("ossaGetDeviceInfoCB: sasAddressHi 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSHI(agDeviceInfo))); TI_DBG1(("ossaGetDeviceInfoCB: sasAddressLo 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSLO(agDeviceInfo))); TI_DBG1(("ossaGetDeviceInfoCB: devType_S_Rate 0x%08x\n", agDeviceInfo->devType_S_Rate)); TI_DBG1(("ossaGetDeviceInfoCB: firstBurstSize 0x%08x\n", agDeviceInfo->firstBurstSize)); /*ostiPortEvent (tiRoot, tiGetDevInfo, tiSuccess,(void *)agContext );*/ /*ostiGetDeviceInfoIOCTLRsp(tiRoot, status, agDeviceInfo);*/ break; case OSSA_DEV_INFO_SAS_EXTENDED_INFO: #ifdef TD_DEBUG_ENABLE agSASDeviceInfo = (agsaSASDeviceInfo_t *)agInfo; #endif TI_DBG2(("ossaGetDeviceInfoCB: OSSA_DEV_INFO_SAS_EXTENDED_INFO\n")); TI_DBG2(("ossaGetDeviceInfoCB: sasAddressHi 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSHI(&agSASDeviceInfo->commonDevInfo))); TI_DBG2(("ossaGetDeviceInfoCB: sasAddressLo 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSLO(&agSASDeviceInfo->commonDevInfo))); TI_DBG2(("ossaGetDeviceInfoCB: initiator_ssp_stp_smp %d\n", agSASDeviceInfo->initiator_ssp_stp_smp)); TI_DBG2(("ossaGetDeviceInfoCB: target_ssp_stp_smp %d\n", agSASDeviceInfo->target_ssp_stp_smp)); TI_DBG2(("ossaGetDeviceInfoCB: numOfPhys %d\n", agSASDeviceInfo->numOfPhys)); TI_DBG2(("ossaGetDeviceInfoCB: phyIdentifier %d\n", agSASDeviceInfo->phyIdentifier)); break; case OSSA_DEV_INFO_SATA_EXTENDED_INFO: #ifdef TD_DEBUG_ENABLE agSATADeviceInfo = (agsaSATADeviceInfo_t *)agInfo; #endif TI_DBG2(("ossaGetDeviceInfoCB: OSSA_DEV_INFO_SATA_EXTENDED_INFO\n")); TI_DBG2(("ossaGetDeviceInfoCB: sasAddressHi 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSHI(&agSATADeviceInfo->commonDevInfo))); TI_DBG2(("ossaGetDeviceInfoCB: sasAddressLo 0x%08x\n", SA_DEVINFO_GET_SAS_ADDRESSLO(&agSATADeviceInfo->commonDevInfo))); TI_DBG2(("ossaGetDeviceInfoCB: connection %d\n", agSATADeviceInfo->connection)); TI_DBG2(("ossaGetDeviceInfoCB: portMultiplierField %d\n", agSATADeviceInfo->portMultiplierField)); TI_DBG2(("ossaGetDeviceInfoCB: stpPhyIdentifier %d\n", agSATADeviceInfo->stpPhyIdentifier)); #ifdef TD_DEBUG_ENABLE tdhexdump("ossaGetDeviceInfoCB: signature", (bit8 *)agSATADeviceInfo->signature, 8); #endif break; default: TI_DBG2(("ossaGetDeviceInfoCB: error default case, status is %d\n", status)); break; } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yd"); return; } /*****************************************************************************/ /*! \brief ossaDeviceRegistrationCB * * * Purpose: This routine is called by lower layer to corresponding to * saRegisterNewDevice() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the get device handle request originally * passed into saRegisterNewDevice(). * \param status: status * \param agDevHandle: Pointer to the assigned device handle for the * registered device. * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaDeviceRegistrationCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, agsaDevHandle_t *agDevHandle, bit32 deviceID ) { #ifdef INITIATOR_DRIVER tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; bit32 Indenom = tdsaAllShared->QueueConfig.numInboundQueues; bit32 Outdenom = tdsaAllShared->QueueConfig.numOutboundQueues; tdsaDeviceData_t *oneDeviceData = (tdsaDeviceData_t *)agContext->osData; tdsaPortContext_t *onePortContext = oneDeviceData->tdPortContext; tiPortalContext_t *tiPortalContext = onePortContext->tiPortalContext; #ifdef FDS_DM dmRoot_t *dmRoot = &(tdsaAllShared->dmRoot); dmPortContext_t *dmPortContext = &(onePortContext->dmPortContext); dmDeviceInfo_t dmDeviceInfo; bit32 DMstatus = DM_RC_FAILURE; bit16 ext = 0; bit32 expanderType = 1; #endif #if defined(FDS_DM) && !defined(FDS_SM) bit32 IDstatus; #endif #ifdef FDS_SM smRoot_t *smRoot = &(tdsaAllShared->smRoot); bit32 SMstatus = SM_RC_FAILURE; #endif smTraceFuncEnter(hpDBG_VERY_LOUD,"Ye"); TI_DBG3(("ossaDeviceRegistrationCB: start status 0x%x\n",status)); TI_DBG3(("ossaDeviceRegistrationCB: device AddrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("ossaDeviceRegistrationCB: device AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG3(("ossaDeviceRegistrationCB: did 0x%x\n", oneDeviceData->id)); TI_DBG3(("ossaDeviceRegistrationCB: deviceID 0x%x\n", deviceID)); TI_DBG3(("ossaDeviceRegistrationCB: agDevHandle %p %p %p\n",agDevHandle,agDevHandle->osData,agDevHandle->sdkData )); /* transient period caused by tdssReportRemovals(), device was in the middle of registration but port is invalidated */ if (oneDeviceData->valid == agFALSE && oneDeviceData->valid2 == agFALSE && oneDeviceData->DeviceType == TD_DEFAULT_DEVICE) { if (status == OSSA_SUCCESS) { TI_DBG2(("ossaDeviceRegistrationCB: transient, calling saDeregisterDeviceHandle, did %d\n", oneDeviceData->id)); oneDeviceData->agDevHandle = agDevHandle; agDevHandle->osData = oneDeviceData; if (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) { if (oneDeviceData->satDevData.IDDeviceValid == agFALSE) { saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, 0); } else { saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); } } else { saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); } } else if (status == OSSA_FAILURE_PORT_NOT_VALID_STATE || status == OSSA_ERR_PORT_STATE_NOT_VALID) { /* do nothing */ TI_DBG2(("ossaDeviceRegistrationCB: transient, do nothing did %d\n", oneDeviceData->id)); } return; } if (agDevHandle == agNULL) { TI_DBG3(("ossaDeviceRegistrationCB: agDevHandle is NULL\n")); } else { TI_DBG3(("ossaDeviceRegistrationCB: agDevHandle is NOT NULL\n")); } switch (status) { case OSSA_SUCCESS: TI_DBG3(("ossaDeviceRegistrationCB: success\n")); TI_DBG2(("ossaDeviceRegistrationCB: Success did %d FW did 0x%x\n", oneDeviceData->id, deviceID)); TI_DBG2(("ossaDeviceRegistrationCB: Success pid %d\n", onePortContext->id)); if (agDevHandle == agNULL) { TI_DBG1(("ossaDeviceRegistrationCB: agDevHandle is NULL, wrong!\n")); return; } oneDeviceData->agDevHandle = agDevHandle; agDevHandle->osData = oneDeviceData; oneDeviceData->registered = agTRUE; oneDeviceData->InQID = oneDeviceData->id % Indenom; oneDeviceData->OutQID = oneDeviceData->id % Outdenom; onePortContext->RegisteredDevNums++; TI_DBG3(("ossaDeviceRegistrationCB: direct %d STP target %d target_ssp_stp_smp %d\n", oneDeviceData->directlyAttached, DEVICE_IS_STP_TARGET(oneDeviceData), oneDeviceData->target_ssp_stp_smp)); TI_DBG3(("ossaDeviceRegistrationCB: pid %d registeredNumDevice %d\n", onePortContext->id, onePortContext->RegisteredDevNums)); TI_DBG3(("ossaDeviceRegistrationCB: pid %d Count %d\n", onePortContext->id, onePortContext->Count)); #ifdef FDS_DM /* if device is an expander, register it to DM */ if (onePortContext->valid == agTRUE) { if (DEVICE_IS_SMP_TARGET(oneDeviceData)) { TI_DBG1(("ossaDeviceRegistrationCB: calling dmRegisterDevice\n")); TI_DBG1(("ossaDeviceRegistrationCB: device AddrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG1(("ossaDeviceRegistrationCB: device AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); /* set up dmDeviceInfo */ osti_memset(&dmDeviceInfo, 0, sizeof(dmDeviceInfo_t)); DEVINFO_PUT_SAS_ADDRESSLO(&dmDeviceInfo, oneDeviceData->SASAddressID.sasAddressLo); DEVINFO_PUT_SAS_ADDRESSHI(&dmDeviceInfo, oneDeviceData->SASAddressID.sasAddressHi); dmDeviceInfo.initiator_ssp_stp_smp = oneDeviceData->initiator_ssp_stp_smp; dmDeviceInfo.target_ssp_stp_smp = oneDeviceData->target_ssp_stp_smp; dmDeviceInfo.devType_S_Rate = oneDeviceData->agDeviceInfo.devType_S_Rate; if (oneDeviceData->directlyAttached == agTRUE) { /* setting SMP bit */ ext = (bit16)(ext | 0x100); expanderType = SA_IDFRM_GET_DEVICETTYPE(&onePortContext->sasIDframe); ext = (bit16)( ext | (expanderType << 9)); /* setting MCN field to 0xF */ ext = (bit16)(ext | (bit16)(0xF << 11)); TI_DBG1(("ossaDeviceRegistrationCB: directlyAttached ext 0x%x\n", ext)); dmDeviceInfo.ext = ext; } DMstatus = dmRegisterDevice(dmRoot, dmPortContext, &dmDeviceInfo, oneDeviceData->agDevHandle); if (DMstatus != DM_RC_SUCCESS) { TI_DBG1(("ossaDeviceRegistrationCB: dmRegisterDevice failed!!! 0x%x\n", DMstatus)); } } } #endif /* FDS_DM */ #ifdef FDS_SM /* if device is SATA, register it to SM */ if (onePortContext->valid == agTRUE) { if (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) { TI_DBG1(("ossaDeviceRegistrationCB: calling smRegisterDevice\n")); if (oneDeviceData->directlyAttached == agTRUE) { SMstatus = smRegisterDevice(smRoot, agDevHandle, &(oneDeviceData->smDeviceHandle), agNULL, (bit32)oneDeviceData->phyID, oneDeviceData->satDevData.satDeviceType); } else { if (oneDeviceData->ExpDevice == agNULL) { TI_DBG1(("ossaDeviceRegistrationCB: oneDeviceData->ExpDevice NULL!!!\n")); return; } if (oneDeviceData->ExpDevice->agDevHandle == agNULL) { TI_DBG1(("ossaDeviceRegistrationCB: oneDeviceData->ExpDevice->agDevHandle NULL!!!\n")); } SMstatus = smRegisterDevice(smRoot, agDevHandle, &(oneDeviceData->smDeviceHandle), oneDeviceData->ExpDevice->agDevHandle, (bit32)oneDeviceData->phyID, oneDeviceData->satDevData.satDeviceType); } if (SMstatus != SM_RC_SUCCESS) { TI_DBG1(("ossaDeviceRegistrationCB: smRegisterDevice failed!!! 0x%x\n", DMstatus)); } } } #endif /* FDS_SM */ /* special case for directly attached targets */ if (oneDeviceData->directlyAttached == agTRUE) { TI_DBG3(("ossaDeviceRegistrationCB: directly attached did %d\n", oneDeviceData->id)); if (oneDeviceData->DeviceType == TD_SAS_DEVICE) { TI_DBG3(("ossaDeviceRegistrationCB: SAS target\n")); if (onePortContext->valid == agTRUE) { if (onePortContext->PortRecoverPhyID != 0xFF) { oneDeviceData->phyID = (bit8)onePortContext->PortRecoverPhyID; onePortContext->PortRecoverPhyID = 0xFF; TI_DBG3(("ossaDeviceRegistrationCB: PortRecoverPhyID %d\n", oneDeviceData->phyID)); } /* link up and discovery ready event */ if (onePortContext->DiscoveryRdyGiven == agFALSE) { TI_DBG2(("ossaDeviceRegistrationCB: link up and discovery ready\n")); TI_DBG3(("ossaDeviceRegistrationCB: phyID %d pid %d\n", oneDeviceData->phyID, onePortContext->id)); TI_DBG3(("ossaDeviceRegistrationCB: tiPortalContext %p\n", tdsaAllShared->Ports[oneDeviceData->phyID].tiPortalContext)); TI_DBG3(("ossaDeviceRegistrationCB: onePortContext->tiPortalContext %p\n", onePortContext->tiPortalContext)); onePortContext->DiscoveryRdyGiven = agTRUE; if (onePortContext->DiscoveryState != ITD_DSTATE_NOT_STARTED) { TI_DBG1(("ossaDeviceRegistrationCB: wrong discovery state 0x%x\n", onePortContext->DiscoveryState)); } /* notifying link up */ ostiPortEvent ( tiRoot, tiPortLinkUp, tiSuccess, (void *)onePortContext->tiPortalContext ); #ifdef INITIATOR_DRIVER /* triggers discovery */ ostiPortEvent( tiRoot, tiPortDiscoveryReady, tiSuccess, (void *)onePortContext->tiPortalContext ); #endif } } else { TI_DBG2(("ossaDeviceRegistrationCB: abort call\n")); /* abort all followed by deregistration of sas target */ tdsaAbortAll(tiRoot, agRoot, oneDeviceData); } } else { TI_DBG2(("ossaDeviceRegistrationCB: SATA target\n")); if (onePortContext->valid == agTRUE) { if (oneDeviceData->satDevData.IDDeviceValid == agFALSE) { #ifdef FDS_SM /* send identify device data */ tdIDStart(tiRoot, agRoot, smRoot, oneDeviceData, onePortContext); #else /* send identify device data */ tdssSubAddSATAToSharedcontext(tiRoot, oneDeviceData); #endif } } else { TI_DBG2(("ossaDeviceRegistrationCB: abort call\n")); /* abort all followed by deregistration of sas target */ tdsaAbortAll(tiRoot, agRoot, oneDeviceData); } } } else /* behind the expander */ { #if defined(FDS_DM) && defined(FDS_SM) /* send ID to SATA targets needs go allocate tdIORequestBody_t for smIORequest */ if ( (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) && oneDeviceData->satDevData.IDDeviceValid == agFALSE) { tdIDStart(tiRoot, agRoot, smRoot, oneDeviceData, onePortContext); } #elif defined(FDS_DM) /* worked with DM */ if ( (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) && oneDeviceData->satDevData.IDDeviceValid == agFALSE) { IDstatus = tdsaDiscoveryStartIDDev(tiRoot, agNULL, &(oneDeviceData->tiDeviceHandle), agNULL, oneDeviceData); if (IDstatus != tiSuccess) { /* identify device data is not valid */ TI_DBG1(("ossaDeviceRegistrationCB: fail or busy %d\n", IDstatus)); oneDeviceData->satDevData.IDDeviceValid = agFALSE; } } #endif } /* after discovery is finished */ if (onePortContext->DiscoveryState == ITD_DSTATE_COMPLETED) { TI_DBG2(("ossaDeviceRegistrationCB: calling new device arrival\n")); if (DEVICE_IS_SSP_TARGET(oneDeviceData)) { /* in case registration is finished after discovery is finished */ #ifdef AGTIAPI_CTL if (tdsaAllShared->SASConnectTimeLimit) tdsaCTLSet(tiRoot, onePortContext, tiIntrEventTypeDeviceChange, tiDeviceArrival); else #endif ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceArrival, agNULL ); } else if ( (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) && oneDeviceData->satDevData.IDDeviceValid == agTRUE ) { /* in case registration is finished after discovery is finished */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceArrival, agNULL ); } } break; case OSSA_FAILURE_OUT_OF_RESOURCE: /* fall through */ case OSSA_ERR_DEVICE_HANDLE_UNAVAILABLE: TI_DBG1(("ossaDeviceRegistrationCB: OSSA_FAILURE_OUT_OF_RESOURCE or OSSA_ERR_DEVICE_HANDLE_UNAVAILABLE\n")); oneDeviceData->registered = agFALSE; break; case OSSA_FAILURE_DEVICE_ALREADY_REGISTERED: /* fall through */ case OSSA_ERR_DEVICE_ALREADY_REGISTERED: /* do nothing */ TI_DBG1(("ossaDeviceRegistrationCB: OSSA_FAILURE_DEVICE_ALREADY_REGISTERED or OSSA_ERR_DEVICE_ALREADY_REGISTERED\n")); break; case OSSA_FAILURE_INVALID_PHY_ID: /* fall through */ case OSSA_ERR_PHY_ID_INVALID: TI_DBG1(("ossaDeviceRegistrationCB: OSSA_FAILURE_INVALID_PHY_ID or OSSA_ERR_PHY_ID_INVALID\n")); oneDeviceData->registered = agFALSE; break; case OSSA_FAILURE_PHY_ID_ALREADY_REGISTERED: /* fall through */ case OSSA_ERR_PHY_ID_ALREADY_REGISTERED: /* do nothing */ TI_DBG1(("ossaDeviceRegistrationCB: OSSA_FAILURE_PHY_ID_ALREADY_REGISTERED or OSSA_ERR_PHY_ID_ALREADY_REGISTERED\n")); break; case OSSA_FAILURE_PORT_ID_OUT_OF_RANGE: /* fall through */ case OSSA_ERR_PORT_INVALID: TI_DBG1(("ossaDeviceRegistrationCB: OSSA_FAILURE_PORT_ID_OUT_OF_RANGE or OSSA_ERR_PORT_INVALID\n")); oneDeviceData->registered = agFALSE; break; case OSSA_FAILURE_PORT_NOT_VALID_STATE: /* fall through */ case OSSA_ERR_PORT_STATE_NOT_VALID: TI_DBG1(("ossaDeviceRegistrationCB: OSSA_FAILURE_PORT_NOT_VALID_STATE or OSSA_ERR_PORT_STATE_NOT_VALID\n")); TI_DBG2(("ossaDeviceRegistrationCB: did %d pid %d\n", oneDeviceData->id, onePortContext->id)); oneDeviceData->registered = agFALSE; /* transient period between link up and link down/port recovery */ onePortContext->Transient = agTRUE; if (onePortContext->valid == agTRUE && (oneDeviceData->valid == agTRUE || oneDeviceData->valid2 == agTRUE)) { TI_DBG1(("ossaDeviceRegistrationCB: retries regisration\n")); #ifdef REMOVED //temp; setting MCN to tdsaAllShared->MCN oneDeviceData->agDeviceInfo.flag = oneDeviceData->agDeviceInfo.flag | (tdsaAllShared->MCN << 16); //end temp #endif saRegisterNewDevice( /* ossaDeviceRegistrationCB */ agRoot, &oneDeviceData->agContext, 0, &oneDeviceData->agDeviceInfo, onePortContext->agPortContext, 0 ); } else if (oneDeviceData->directlyAttached == agTRUE && DEVICE_IS_SATA_DEVICE(oneDeviceData)) { TI_DBG1(("ossaDeviceRegistrationCB: directly attached SATA, put back into free list\n")); tdsaDeviceDataReInit(tiRoot, oneDeviceData); tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); TDLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceList)); tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); } break; case OSSA_FAILURE_DEVICE_TYPE_NOT_VALID: /* fall through */ case OSSA_ERR_DEVICE_TYPE_NOT_VALID: TI_DBG1(("ossaDeviceRegistrationCB: OSSA_FAILURE_DEVICE_TYPE_NOT_VALID or OSSA_ERR_DEVICE_TYPE_NOT_VALID\n")); oneDeviceData->registered = agFALSE; break; default: TI_DBG1(("ossaDeviceRegistrationCB: wrong. default status is %d\n", status)); break; } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Ye"); return; #endif } /*****************************************************************************/ /*! \brief ossaDeregisterDeviceHandleCB * * * Purpose: This routine is called by lower layer to corresponding to * saDeregisterDeviceHandle() * * \param agRoot: Pointer to chip/driver Instance. * \param agDevHandle: Pointer to the assigned device handle for the * registered device. * \param status: status * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaDeregisterDeviceHandleCB( agsaRoot_t *agRoot, agsaContext_t *agContext, agsaDevHandle_t *agDevHandle, bit32 status ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdsaDeviceData_t *oneDeviceData = agNULL; tdsaPortContext_t *onePortContext = agNULL; agsaEventSource_t *eventSource; bit32 HwAckSatus; bit32 PhyID; #ifdef FDS_DM dmRoot_t *dmRoot = &(tdsaAllShared->dmRoot); dmPortContext_t *dmPortContext = agNULL; dmPortInfo_t dmPortInfo; bit32 DMstatus = DM_RC_FAILURE; #endif #ifdef FDS_SM smRoot_t *smRoot = &(tdsaAllShared->smRoot); #endif TI_DBG3(("ossaDeregisterDeviceHandleCB: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yf"); if (status == OSSA_ERR_DEVICE_HANDLE_INVALID) { /* there is no device handle to process */ TI_DBG2(("ossaDeregisterDeviceHandleCB: OSSA_ERR_DEVICE_HANDLE_INVALID\n")); return; } oneDeviceData = (tdsaDeviceData_t *)agDevHandle->osData; onePortContext = oneDeviceData->tdPortContext; #ifdef FDS_DM dmPortContext = &(onePortContext->dmPortContext); #endif if (oneDeviceData->valid == agFALSE && oneDeviceData->valid2 == agFALSE && oneDeviceData->DeviceType == TD_DEFAULT_DEVICE && onePortContext->valid == agTRUE) { TI_DBG2(("ossaDeregisterDeviceHandleCB: transient did %d\n", oneDeviceData->id)); return; } if (onePortContext != agNULL) { TI_DBG2(("ossaDeregisterDeviceHandleCB: pid %d registeredNumDevice %d\n", onePortContext->id, onePortContext->RegisteredDevNums)); } switch (status) { case OSSA_SUCCESS: TI_DBG3(("ossaDeregisterDeviceHandleCB: Success\n")); if (onePortContext == agNULL) { TI_DBG1(("ossaDeregisterDeviceHandleCB: onePortContext is NULL, wrong!\n")); return; } /* port is going down */ if (onePortContext->valid == agFALSE) { if (!(oneDeviceData->valid == agFALSE && oneDeviceData->valid2 == agFALSE && oneDeviceData->DeviceType == TD_DEFAULT_DEVICE)) { /* remove oneDevice from MainLink */ TI_DBG2(("ossaDeregisterDeviceHandleCB: delete from MainLink\n")); #ifdef FDS_SM if (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) { TI_DBG1(("ossaDeregisterDeviceHandleCB: did %d calling smDeregisterDevice\n", oneDeviceData->id)); smDeregisterDevice(smRoot, oneDeviceData->agDevHandle, &(oneDeviceData->smDeviceHandle)); } #endif tdsaDeviceDataReInit(tiRoot, oneDeviceData); osti_memset(&(oneDeviceData->satDevData.satIdentifyData), 0xFF, sizeof(agsaSATAIdentifyData_t)); tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); TDLIST_DEQUEUE_THIS(&(oneDeviceData->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceList)); tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); } /* for portcontext */ PhyID = onePortContext->eventPhyID; TI_DBG3(("ossaDeregisterDeviceHandleCB: PhyID %d\n", PhyID)); onePortContext->RegisteredDevNums--; /* check if valid in tdsaAllShared and the last registered device in a portcontext; if so, call saHwEventAck() */ if (tdsaAllShared->eventSource[PhyID].EventValid == agTRUE && onePortContext->RegisteredDevNums == 0 && PhyID != 0xFF ) { TI_DBG2(("ossaDeregisterDeviceHandleCB: calling saHwEventAck\n")); eventSource = &(tdsaAllShared->eventSource[PhyID].Source); HwAckSatus = saHwEventAck( agRoot, agNULL, /* agContext */ 0, eventSource, /* agsaEventSource_t */ 0, 0 ); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("ossaDeregisterDeviceHandleCB: failing in saHwEventAck; status %d\n", HwAckSatus)); } /* toggle */ tdsaAllShared->eventSource[PhyID].EventValid = agFALSE; #ifdef FDS_DM if (onePortContext->UseDM == agTRUE) { TI_DBG1(("ossaDeregisterDeviceHandleCB: calling dmDestroyPort\n")); /* setup dmPortInfo */ PORTINFO_PUT_SAS_REMOTE_ADDRESSLO(&dmPortInfo, onePortContext->sasRemoteAddressLo); PORTINFO_PUT_SAS_REMOTE_ADDRESSHI(&dmPortInfo, onePortContext->sasRemoteAddressHi); PORTINFO_PUT_SAS_LOCAL_ADDRESSLO(&dmPortInfo, onePortContext->sasLocalAddressLo); PORTINFO_PUT_SAS_LOCAL_ADDRESSHI(&dmPortInfo, onePortContext->sasLocalAddressHi); DMstatus = dmDestroyPort(dmRoot, dmPortContext, &dmPortInfo); if (DMstatus != DM_RC_SUCCESS) { TI_DBG1(("ossaDeregisterDeviceHandleCB: dmDestroyPort failed!!! 0x%x\n", DMstatus)); } } #endif tdsaPortContextReInit(tiRoot, onePortContext); /* put all devices belonging to the onePortContext back to the free link */ tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); TDLIST_DEQUEUE_THIS(&(onePortContext->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(onePortContext->FreeLink), &(tdsaAllShared->FreePortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } else if (tdsaAllShared->eventSource[PhyID].EventValid == NO_ACK && onePortContext->RegisteredDevNums == 0 ) { TI_DBG2(("ossaDeregisterDeviceHandleCB: NO ACK case\n")); #ifdef FDS_DM if (onePortContext->UseDM == agTRUE) { TI_DBG1(("ossaDeregisterDeviceHandleCB: calling dmDestroyPort\n")); /* setup dmPortInfo */ PORTINFO_PUT_SAS_REMOTE_ADDRESSLO(&dmPortInfo, onePortContext->sasRemoteAddressLo); PORTINFO_PUT_SAS_REMOTE_ADDRESSHI(&dmPortInfo, onePortContext->sasRemoteAddressHi); PORTINFO_PUT_SAS_LOCAL_ADDRESSLO(&dmPortInfo, onePortContext->sasLocalAddressLo); PORTINFO_PUT_SAS_LOCAL_ADDRESSHI(&dmPortInfo, onePortContext->sasLocalAddressHi); DMstatus = dmDestroyPort(dmRoot, dmPortContext, &dmPortInfo); if (DMstatus != DM_RC_SUCCESS) { TI_DBG1(("ossaDeregisterDeviceHandleCB: dmDestroyPort failed!!! 0x%x\n", DMstatus)); } } #endif tdsaPortContextReInit(tiRoot, onePortContext); /* put all devices belonging to the onePortContext back to the free link */ tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); TDLIST_DEQUEUE_THIS(&(onePortContext->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(onePortContext->FreeLink), &(tdsaAllShared->FreePortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } else { if (PhyID < TD_MAX_NUM_PHYS) { TI_DBG3(("ossaDeregisterDeviceHandleCB: pid %d eventvalid %d registeredNumDevice %d\n", onePortContext->id, tdsaAllShared->eventSource[PhyID].EventValid , onePortContext->RegisteredDevNums)); } else { TI_DBG3(("ossaDeregisterDeviceHandleCB: pid %d registeredNumDevice %d wrong phyid %d\n", onePortContext->id, onePortContext->RegisteredDevNums, PhyID)); } } } else { PhyID = onePortContext->eventPhyID; TI_DBG3(("ossaDeregisterDeviceHandleCB: PhyID %d\n", PhyID)); onePortContext->RegisteredDevNums--; #ifdef FDS_SM oneDeviceData->satDevData.IDDeviceValid = agFALSE; if (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) { smDeregisterDevice(smRoot, oneDeviceData->agDevHandle, &(oneDeviceData->smDeviceHandle)); } #endif /* check if valid in tdsaAllShared and the last registered device in a portcontext; if so, call saHwEventAck() */ if (tdsaAllShared->eventSource[PhyID].EventValid == agTRUE && onePortContext->RegisteredDevNums == 0 && PhyID != 0xFF ) { TI_DBG2(("ossaDeregisterDeviceHandleCB: calling saHwEventAck\n")); eventSource = &(tdsaAllShared->eventSource[PhyID].Source); HwAckSatus = saHwEventAck( agRoot, agNULL, /* agContext */ 0, eventSource, /* agsaEventSource_t */ 0, 0 ); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("ossaDeregisterDeviceHandleCB: failing in saHwEventAck; status %d\n", HwAckSatus)); } /* toggle */ tdsaAllShared->eventSource[PhyID].EventValid = agFALSE; } #ifdef INITIATOR_DRIVER else if (onePortContext->RegisteredDevNums == 1) { TI_DBG1(("ossaDeregisterDeviceHandleCB: all devices have been deregistered except directly attached EXP\n")); /* qqqqq If broadcast has been seen, call incremental discovery*/ if (onePortContext->DiscFailNSeenBC == agTRUE) { TI_DBG1(("ossaDeregisterDeviceHandleCB: calling dmDiscover, incremental, pid %d\n", onePortContext->id)); dmDiscover(dmRoot, dmPortContext, DM_DISCOVERY_OPTION_INCREMENTAL_START); onePortContext->DiscFailNSeenBC = agFALSE; } else { TI_DBG1(("ossaDeregisterDeviceHandleCB: not calling dmDiscover\n")); /* qqqqq needs to change discovery state to onePortContext->DMDiscoveryState == dmDiscCompleted in dmQueryDiscovery change the discovery state from dmDiscFailed to dmDiscCompleted */ dmResetFailedDiscovery(dmRoot, dmPortContext); } } #endif else { if (PhyID < TD_MAX_NUM_PHYS) { TI_DBG3(("ossaDeregisterDeviceHandleCB: pid %d eventvalid %d registeredNumDevice %d\n", onePortContext->id, tdsaAllShared->eventSource[PhyID].EventValid , onePortContext->RegisteredDevNums)); } else { TI_DBG3(("ossaDeregisterDeviceHandleCB: pid %d registeredNumDevice %d wrong phyid %d\n", onePortContext->id, onePortContext->RegisteredDevNums, PhyID)); } } } break; case OSSA_INVALID_HANDLE: TI_DBG1(("ossaDeregisterDeviceHandleCB: OSSA_INVALID_HANDLE\n")); break; #ifdef REMOVED case OSSA_FAILURE_DEVICE_DIRECT_ATTACH: TI_DBG1(("ossaDeregisterDeviceHandleCB: OSSA_FAILURE_DEVICE_DIRECT_ATTACH\n")); break; #endif case OSSA_ERR_DEVICE_HANDLE_INVALID: TI_DBG1(("ossaDeregisterDeviceHandleCB: OSSA_ERR_DEVICE_HANDLE_INVALID\n")); break; case OSSA_ERR_DEVICE_BUSY: TI_DBG1(("ossaDeregisterDeviceHandleCB: OSSA_ERR_DEVICE_BUSY\n")); break; default: TI_DBG1(("ossaDeregisterDeviceHandleCB: unknown status 0x%x\n", status)); break; } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yf"); return; } /*****************************************************************************/ /*! \brief ossaDeviceHandleRemovedEvent * * * Purpose: This routine is called by lower layer to notify the device removal * * * \param agRoot: Pointer to chip/driver Instance. * \param agDevHandle: Pointer to the assigned device handle for the * registered device. * \param agPortContext:Pointer to this instance of port context. * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaDeviceHandleRemovedEvent ( agsaRoot_t *agRoot, agsaDevHandle_t *agDevHandle, agsaPortContext_t *agPortContext ) { #ifdef NOT_YET tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; #endif tdsaPortContext_t *onePortContext = agNULL; tdsaDeviceData_t *oneDeviceData = (tdsaDeviceData_t *)agDevHandle->osData; smTraceFuncEnter(hpDBG_VERY_LOUD,"Yg"); TI_DBG2(("ossaDeviceHandleRemovedEvent: start\n")); if (oneDeviceData == agNULL) { TI_DBG1(("ossaDeviceHandleRemovedEvent: Wrong! oneDeviceData is NULL\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yg"); return; } TI_DBG2(("ossaDeviceHandleRemovedEvent: did %d\n", oneDeviceData->id)); oneDeviceData->registered = agFALSE; onePortContext = (tdsaPortContext_t *)agPortContext->osData; if (onePortContext == agNULL) { TI_DBG1(("ossaDeviceHandleRemovedEvent: Wrong! onePortContext is NULL\n")); smTraceFuncExit(hpDBG_VERY_LOUD, 'b', "Yg"); return; } TI_DBG2(("ossaDeviceHandleRemovedEvent: pid %d\n", onePortContext->id)); onePortContext->RegisteredDevNums--; #ifdef NOT_YET ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceRemoval, agNULL ); #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'c', "Yg"); return; } #ifdef SPC_ENABLE_PROFILE /*****************************************************************************/ /*! \brief ossaFwProfileCB * * * Purpose: This routine is called by lower layer to corresponding to * saFwProfile() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * into saFwProfile() * \param status: status * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaFwProfileCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 len) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG2(("ossaFwProfileCB: start\n")); switch (status) { case AGSA_RC_SUCCESS: { TI_DBG2(("ossaFwProfileCB: SUCCESS\n")); break; } case AGSA_RC_FAILURE: { TI_DBG1(("ossaFwProfileCB: FAIL\n")); break; } default: { TI_DBG1(("ossaFwProfileCB: !!! default, status %d\n", status)); break; } } ostiFWProfileIOCTLRsp(tiRoot, status, len); return; } #endif /*****************************************************************************/ /*! \brief ossaFwFlashUpdateCB * * * Purpose: This routine is called by lower layer to corresponding to * saFwFlashUpdate() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * into saFwFlashUpdate() * \param status: status * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaFwFlashUpdateCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG2(("ossaFwFlashUpdateCB: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yh"); switch (status) { case OSSA_FLASH_UPDATE_COMPLETE_PENDING_REBOOT: { TI_DBG2(("ossaFwFlashUpdateCB: OSSA_FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n")); break; } case OSSA_FLASH_UPDATE_IN_PROGRESS: { TI_DBG2(("ossaFwFlashUpdateCB: OSSA_FLASH_UPDATE_IN_PROGRESS\n")); break; } case OSSA_FLASH_UPDATE_HDR_ERR: { TI_DBG1(("ossaFwFlashUpdateCB: OSSA_FLASH_UPDATE_HDR_ERR\n")); break; } case OSSA_FLASH_UPDATE_OFFSET_ERR: { TI_DBG1(("ossaFwFlashUpdateCB: OSSA_FLASH_UPDATE_OFFSET_ERR\n")); break; } case OSSA_FLASH_UPDATE_CRC_ERR: { TI_DBG1(("ossaFwFlashUpdateCB: OSSA_FLASH_UPDATE_CRC_ERR\n")); break; } case OSSA_FLASH_UPDATE_LENGTH_ERR: { TI_DBG1(("ossaFwFlashUpdateCB: OSSA_FLASH_UPDATE_LENGTH_ERR\n")); break; } case OSSA_FLASH_UPDATE_HW_ERR: { TI_DBG1(("ossaFwFlashUpdateCB: OSSA_FLASH_UPDATE_HW_ERR\n")); break; } case OSSA_FLASH_UPDATE_DNLD_NOT_SUPPORTED: { TI_DBG1(("ossaFwFlashUpdateCB: OSSA_FLASH_UPDATE_DNLD_NOT_SUPPORTED\n")); break; } case OSSA_FLASH_UPDATE_DISABLED: { TI_DBG1(("ossaFwFlashUpdateCB: OSSA_FLASH_UPDATE_DISABLED\n")); break; } case OSSA_FLASH_FWDNLD_DEVICE_UNSUPPORT: { TI_DBG1(("ossaFwFlashUpdateCB: OSSA_FLASH_FWDNLD_DEVICE_UNSUPPORT\n")); break; } case OSSA_MPI_ERR_IO_RESOURCE_UNAVAILABLE: { TI_DBG1(("ossaFwFlashUpdateCB: OSSA_MPI_ERR_IO_RESOURCE_UNAVAILABLE\n")); break; } case OSSA_FLASH_UPDATE_HMAC_ERR: { TI_DBG1(("ossaFwFlashUpdateCB: OSSA_FLASH_UPDATE_HMAC_ERR\n")); break; } default: { TI_DBG1(("ossaFwFlashUpdateCB: !!! default, status 0x%X\n", status)); break; } } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yh"); ostiCOMMgntIOCTLRsp(tiRoot, status); return; } GLOBAL void ossaFlashExtExecuteCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 command, agsaFlashExtResponse_t *agFlashExtRsp) { TI_DBG1(("ossaFlashExtExecuteCB: command 0x%X status 0x%X\n",command, status)); } /*****************************************************************************/ /*! \brief ossaGetNVMDResponseCB * * * Purpose: This routine is called by lower layer to corresponding to * saGetNVMDCommand() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * into saGetVPDCommand() * \param status: status * \param indirectPayload: The value passed in agsaNVMDData_t when * calling saGetNVMDCommand() * \param agInfoLen: the length of VPD information * \param agFrameHandle: handler of VPD information * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaGetNVMDResponseCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit8 indirectPayload, bit32 agInfoLen, agsaFrameHandle_t agFrameHandle ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG2(("ossaGetNVMDResponseCB: start\n")); TI_DBG2(("ossaGetNVMDResponseCB: agInfoLen %d\n", agInfoLen)); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yi"); if (status == OSSA_SUCCESS) { TI_DBG2(("ossaGetNVMDResponseCB: Success status\n")); if (indirectPayload == 0 && agInfoLen != 0) { TI_DBG2(("ossaGetNVMDResponseCB: direct\n")); tdhexdump("ossaGetNVMDResponseCB", (bit8 *)agFrameHandle, agInfoLen); } } else { TI_DBG1(("ossaGetNVMDResponseCB: Status 0x%x\n", status)); } if (indirectPayload == 0) { TI_DBG2(("ossaGetNVMDResponseCB: direct\n")); } else { TI_DBG2(("ossaGetNVMDResponseCB: indirect\n")); } ostiGetNVMDIOCTLRsp(tiRoot, status); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yi"); return; } /*****************************************************************************/ /*! \brief ossaSetNVMDResponseCB * * * Purpose: This routine is called by lower layer to corresponding to * saSetNVMDCommand() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * into saSetVPDCommand() * \param status: status * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaSetNVMDResponseCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG2(("ossaSetNVMDResponseCB: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yj"); if (status == OSSA_SUCCESS) { TI_DBG2(("ossaSetNVMDResponseCB: success\n")); } else { TI_DBG1(("ossaSetNVMDResponseCB: fail or undefined staus %d\n", status)); } ostiSetNVMDIOCTLRsp(tiRoot, status); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yj"); return; } #ifdef REMOVED /*****************************************************************************/ /*! \brief ossaGetVPDResponseCB * * * Purpose: This routine is called by lower layer to corresponding to * saGetVPDCommand() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * into saGetVPDCommand() * \param status: status * \param agInfoLen: the length of VPD information * \param agFrameHandle:handler of VPD information * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaGetVPDResponseCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit8 indirectMode, bit32 agInfoLen, agsaFrameHandle_t agFrameHandle ) { bit8 VPDData[48]; TI_DBG2(("ossaGetVPDResponseCB: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yk"); if (status == OSSA_SUCCESS) { TI_DBG2(("ossaGetVPDResponseCB: agInfoLen %d\n", agInfoLen)); osti_memset(VPDData, 0, 48); /* We can read only in case of Direct */ saFrameReadBlock(agRoot, agFrameHandle, 0, VPDData, agInfoLen); tdhexdump("ossaGetVPDResponseCB", (bit8 *)VPDData, agInfoLen); /* callback osti.... */ } else { TI_DBG1(("ossaGetVPDResponseCB: fail or undefined staus %d\n", status)); } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yk"); return; } /*****************************************************************************/ /*! \brief ossaSetVPDResponseCB * * * Purpose: This routine is called by lower layer to corresponding to * saSetVPDCommand() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * into saSetVPDCommand() * \param status: status * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaSetVPDResponseCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG2(("ossaSetVPDResponseCB: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yl"); if (status == OSSA_SUCCESS) { TI_DBG2(("ossaSetVPDResponseCB: success\n")); ostiCOMMgntVPDSetIOCTLRsp(tiRoot, 0); /* callback osti..... */ #ifdef VPD_TESTING /* temporary to test saSetVPDCommand() and saGetVPDCommand */ tdsaVPDGet(tiRoot); #endif } else { TI_DBG1(("ossaSetVPDResponseCB: fail or undefined staus %d\n", status)); } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yl"); return; } #endif /*****************************************************************************/ /*! \brief ossaEchoCB * * * Purpose: This routine is called by lower layer to corresponding to * saEchoCommand() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * into saEchoCommand() * \param echoPayload: Pointer to the echo payload * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaEchoCB( agsaRoot_t *agRoot, agsaContext_t *agContext, void *echoPayload ) { #ifdef ECHO_TESTING tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; bit8 payload[56]; #endif TI_DBG2(("ossaEchoCB: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Ym"); /* dumping received echo payload is 56 bytes */ tdhexdump("ossaEchoCB: echoPayload", (bit8 *)(echoPayload), 56); #ifdef ECHO_TESTING /* temporary to test saEchoCommand() */ /* new echo payload */ osti_memset(payload,0, sizeof(payload)); payload[0] = gEcho; payload[55] = gEcho; TI_DBG2(("ossaEchoCB: gEcho %d\n", gEcho)); saEchoCommand(agRoot, agNULL, tdsaRotateQnumber(tiRoot, agNULL), (void *)&payload); if (gEcho == 0xFF) { gEcho = 0; } else { gEcho++; } #endif smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Ym"); return; } /*****************************************************************************/ /*! \brief ossaGpioResponseCB * * * Purpose: This routine is called by lower layer to corresponding to * saGpioEventSetup(), saGpioPinSetup(), saGpioRead(), or * saGpioWrite() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * in. * \param status: GPIO operation completion status * \param gpioReadValue: a bit map containing the corresponding * value for each GPIO pin. * \param gpioPinSetupInfo: Pointer to agsaGpioPinSetupInfo_t structure * describing the GPIO pin setup * \param gpioEventSetupInfo Pointer to agsaGpioEventSetupInfo_t structure * describing the GPIO event setups * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaGpioResponseCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 gpioReadValue, agsaGpioPinSetupInfo_t *gpioPinSetupInfo, agsaGpioEventSetupInfo_t *gpioEventSetupInfo ) { TI_DBG2(("ossaGpioResponseCB: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yn"); if (status == OSSA_SUCCESS) { TI_DBG2(("ossaGpioResponseCB: Success\n")); /* printing gpioReadValue, agsaGpioPinSetupInfo_t and agsaGpioEventSetupInfo_t */ TI_DBG2(("ossaGpioResponseCB: gpioReadValue 0x%x\n", gpioReadValue)); TI_DBG2(("ossaGpioResponseCB: PinSetupInfo gpioInputEnabled 0x%x\n", gpioPinSetupInfo->gpioInputEnabled)); TI_DBG2(("ossaGpioResponseCB: PinSetupInfo gpioTypePart1 0x%x\n", gpioPinSetupInfo->gpioTypePart1)); TI_DBG2(("ossaGpioResponseCB: PinSetupInfo gpioTypePart2 0x%x\n", gpioPinSetupInfo->gpioTypePart2)); TI_DBG2(("ossaGpioResponseCB: EventSetupInfo gpioEventLevel 0x%x\n", gpioEventSetupInfo->gpioEventLevel)); TI_DBG2(("ossaGpioResponseCB: EventSetupInfo gpioEventRisingEdge 0x%x\n", gpioEventSetupInfo->gpioEventRisingEdge)); TI_DBG2(("ossaGpioResponseCB: EventSetupInfo gpioEventFallingEdge 0x%x\n", gpioEventSetupInfo->gpioEventFallingEdge)); } else { TI_DBG1(("ossaGpioResponseCB: Failure\n")); } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yn"); return; } /*****************************************************************************/ /*! \brief ossaGpioEvent * * * Purpose: This routine is called by lower layer to corresponding to * saGpioEventSetup(), saGpioPinSetup(), saGpioRead(), or * saGpioWrite() * * \param agRoot: Pointer to chip/driver Instance. * \param gpioEvent: a bit map that indicates which GPIO * input pins have generated the event. * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaGpioEvent( agsaRoot_t *agRoot, bit32 gpioEvent ) { TI_DBG2(("ossaGpioEvent: start\n")); TI_DBG2(("ossaGpioEvent: gpioEvent 0x%x\n", gpioEvent)); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yo"); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yo"); return; } /*****************************************************************************/ /*! \brief ossaSASDiagExecuteCB * * * Purpose: This routine is called by lower layer to corresponding to * saSASDiagExecute() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * in. * \param status: Diagnostic operation completion status * \param command: SAS diagnostic command field in agsaSASDiagExecute_t * structure passed in saSASDiagExecute(). * \param reportData: Report Diagnostic Data * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaSASDiagExecuteCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 command, bit32 reportData) { smTraceFuncEnter(hpDBG_VERY_LOUD,"Yq"); TI_DBG2(("ossaSASDiagExecuteCB: start\n")); TI_DBG2(("ossaSASDiagExecuteCB: status %d\n", status)); TI_DBG2(("ossaSASDiagExecuteCB: command %d\n", command)); TI_DBG2(("ossaSASDiagExecuteCB: reportData %d\n", reportData)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yq"); return; } /*****************************************************************************/ /*! \brief ossaSASDiagStartEndCB * * * Purpose: This routine is called by lower layer to corresponding to * saSASDiagExecute() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * in. * \param status: Diagnostic operation completion status * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaSASDiagStartEndCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status) { TI_DBG2(("ossaSASDiagStartEndCB: start\n")); TI_DBG2(("ossaSASDiagStartEndCB: status %d\n", status)); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yr"); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yr"); return; } /*****************************************************************************/ /*! \brief ossaReconfigSASParamsCB * * * Purpose: This routine is called by lower layer to corresponding to * saReconfigSASParams() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * in saReconfigSASParams(). * \param status: saReconfigSASParams() completion status * \param agSASConfig: Pointer to the data structure agsaSASReconfig_t * * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaReconfigSASParamsCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, agsaSASReconfig_t *agSASConfig) { TI_DBG2(("ossaReconfigSASParamsCB: status %d\n", status)); return; } GLOBAL void ossaPCIeDiagExecuteCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 command, agsaPCIeDiagResponse_t *resp ) { TI_DBG2(("ossaPCIeDiagExecuteCB: status %d\n", status)); TI_DBG2(("ossaPCIeDiagExecuteCB: ERR_BLKH 0x%X\n",resp->ERR_BLKH )); TI_DBG2(("ossaPCIeDiagExecuteCB: ERR_BLKL 0x%X\n",resp->ERR_BLKL )); TI_DBG2(("ossaPCIeDiagExecuteCB: DWord8 0x%X\n",resp->DWord8 )); TI_DBG2(("ossaPCIeDiagExecuteCB: DWord9 0x%X\n",resp->DWord9 )); TI_DBG2(("ossaPCIeDiagExecuteCB: DWord10 0x%X\n",resp->DWord10 )); TI_DBG2(("ossaPCIeDiagExecuteCB: DWord11 0x%X\n",resp->DWord11 )); TI_DBG2(("ossaPCIeDiagExecuteCB: DIF_ERR 0x%X\n",resp->DIF_ERR )); return; } #ifndef BIOS GLOBAL void ossaSGpioCB( agsaRoot_t *agRoot, agsaContext_t *agContext, agsaSGpioReqResponse_t *pSgpioResponse ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG2(("ossaSGpioCB: smpFrameType: 0x%02x \n", pSgpioResponse->smpFrameType)); // printf("SS:ossaSGpioCB: smpFrameType: 0x%02x \n", pSgpioResponse->smpFrameType); TI_DBG2(("ossaSGpioCB: function: 0x%02x \n", pSgpioResponse->function)); TI_DBG2(("ossaSGpioCB: functionResult: 0x%02x \n", pSgpioResponse->functionResult)); //printf("SS:ossaSGpioCB: functionResult: 0x%02x \n", pSgpioResponse->functionResult); tdhexdump("ossaSGpioCB Response", (bit8 *)pSgpioResponse, sizeof(agsaSGpioReqResponse_t)); ostiSgpioIoctlRsp(tiRoot, pSgpioResponse); } #endif /* BIOS */ /*****************************************************************************/ /*! \brief ossaLogDebugString * * * Purpose: This routine is called by lower layer to log. * * \param agRoot: Pointer to chip/driver Instance. * \param level: Detail of information desired. * \param string: Pointer to the character string. * \param ptr1: First pointer value. * \param ptr2: Second pointer value. * \param value1: First 32-bit value related to the specific information. * \param value2: Second 32-bit value related to the specific information. * * \return None. * */ /*****************************************************************************/ GLOBAL void ossaLogDebugString( agsaRoot_t *agRoot, bit32 level, char *string, void *ptr1, void *ptr2, bit32 value1, bit32 value2 ) { #if defined(SALLSDK_DEBUG) TIDEBUG_MSG(gLLDebugLevel, level, ("%s %p %p %d %d\n", string, ptr1, ptr2, value1, value2)); #endif return; } /*****************************************************************************/ /*! \brief ossaHwEventAckCB * * * Purpose: This routine is called by lower layer to corresponding to * saHwEventAck(() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * in. * \param status: Status * * * \return None. * */ /*****************************************************************************/ GLOBAL void ossaHwEventAckCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status ) { TI_DBG3(("ossaHwEventAckCB: start\n")); smTraceFuncEnter(hpDBG_VERY_LOUD,"Ys"); if (status == tiSuccess) { TI_DBG3(("ossaHwEventAckCB: SUCCESS status\n")); } else { TI_DBG1(("ossaHwEventAckCB: FAIL status 0x%X\n", status)); TI_DBG1(("ossaHwEventAckCB: invalid event status bit0 %d\n", status & 0x01)); TI_DBG1(("ossaHwEventAckCB: invalid phyid status bit1 %d\n", (status & 0x02) >> 1 )); TI_DBG1(("ossaHwEventAckCB: invalid portcontext status bit2 %d\n", (status & 0x04) >> 2)); TI_DBG1(("ossaHwEventAckCB: invalid param0 status bit3 %d\n", (status & 0x08) >> 3)); } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Ys"); return; } /*****************************************************************************/ /*! \brief ossaGetTimeStampCB * * * Purpose: This routine is called by lower layer to corresponding to * saGetTimeStamp() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally passed * in. * \param timeStampLower: The controller lower 32-bit of internal time * stamp associated with event log. * \param timeStampUpper: The controller upper 32-bit of internal time * stamp associated with event log. * * * \return None. * */ /*****************************************************************************/ GLOBAL void ossaGetTimeStampCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 timeStampLower, bit32 timeStampUpper ) { smTraceFuncEnter(hpDBG_VERY_LOUD,"Yt"); TI_DBG4(("ossaGetTimeStampCB: start\n")); TI_DBG4(("ossaGetTimeStampCB: timeStampUpper 0x%x timeStampLower 0x%x\n", timeStampUpper, timeStampLower)); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yt"); return; } /*****************************************************************************/ /*! \brief ossaSMPAbortCB * * * Purpose: This routine is called by lower layer to corresponding to * saSMPAbort() * * \param agRoot: Pointer to chip/driver Instance. * \param agIORequest: This is the agIORequest parameter passed in * saSMPAbort() * \param status: Status of abort * * \return None. * */ /*****************************************************************************/ GLOBAL void ossaSMPAbortCB( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 flag, bit32 status) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdIORequestBody_t *tdAbortIORequestBody = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; tiDeviceHandle_t *tiDeviceHandle = agNULL; TI_DBG4(("ossaSMPAbortCB: start\n")); TI_DBG4(("ossaSMPAbortCB: flag %d\n", flag)); TI_DBG4(("ossaSMPAbortCB: status %d\n", status)); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yu"); tdAbortIORequestBody = (tdIORequestBody_t *)agIORequest->osData; if (tdAbortIORequestBody == agNULL) { TI_DBG1(("ossaSMPAbortCB: tdAbortIORequestBody is NULL warning!!!!\n")); return; } if (flag == 2) { /* abort per port */ TI_DBG2(("ossaSMPAbortCB: abort per port\n")); } else if (flag == 1) { TI_DBG2(("ossaSMPAbortCB: abort all\n")); tiDeviceHandle = (tiDeviceHandle_t *)tdAbortIORequestBody->tiDevHandle; if (tiDeviceHandle == agNULL) { TI_DBG1(("ossaSMPAbortCB: tiDeviceHandle is NULL warning!!!!\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; if (oneDeviceData == agNULL) { TI_DBG1(("ossaSMPAbortCB: oneDeviceData is NULL warning!!!!\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } if (status == OSSA_IO_SUCCESS) { TI_DBG2(("ossaSMPAbortCB: OSSA_IO_SUCCESS\n")); /* clean up TD layer's IORequestBody */ TI_DBG3(("ossaSMPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG2(("ossaSMPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NOT_VALID) { TI_DBG1(("ossaSMPAbortCB: OSSA_IO_NOT_VALID\n")); /* clean up TD layer's IORequestBody */ TI_DBG1(("ossaSMPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG1(("ossaSMPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NO_DEVICE) { TI_DBG1(("ossaSMPAbortCB: OSSA_IO_NO_DEVICE\n")); /* clean up TD layer's IORequestBody */ TI_DBG1(("ossaSMPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG1(("ossaSMPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_ABORT_IN_PROGRESS) { TI_DBG1(("ossaSMPAbortCB: OSSA_IO_ABORT_IN_PROGRESS\n")); /* clean up TD layer's IORequestBody */ TI_DBG1(("ossaSMPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG1(("ossaSMPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #ifdef REMOVED else if (status == OSSA_IO_ABORT_DELAYED) { TI_DBG1(("ossaSMPAbortCB: OSSA_IO_ABORT_DELAYED\n")); /* clean up TD layer's IORequestBody */ TI_DBG1(("ossaSMPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG1(("ossaSMPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #endif else { TI_DBG1(("ossaSMPAbortCB: other status %d\n", status)); /* clean up TD layer's IORequestBody */ TI_DBG1(("ossaSMPAbortCB: calling saDeregisterDeviceHandle\n")); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); TI_DBG1(("ossaSMPAbortCB: did %d\n", oneDeviceData->id)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } } else if (flag == 0) { TI_DBG2(("ossaSMPAbortCB: abort one\n")); if (status == OSSA_IO_SUCCESS) { TI_DBG2(("ossaSMPAbortCB: OSSA_IO_SUCCESS\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NOT_VALID) { TI_DBG1(("ossaSMPAbortCB: OSSA_IO_NOT_VALID\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_NO_DEVICE) { TI_DBG1(("ossaSMPAbortCB: OSSA_IO_NO_DEVICE\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else if (status == OSSA_IO_ABORT_IN_PROGRESS) { TI_DBG1(("ossaSMPAbortCB: OSSA_IO_ABORT_IN_PROGRESS\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #ifdef REMOVED else if (status == OSSA_IO_ABORT_DELAYED) { TI_DBG1(("ossaSMPAbortCB: OSSA_IO_ABORT_DELAYED\n")); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } #endif else { TI_DBG1(("ossaSMPAbortCB: other status %d\n", status)); ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } } else { TI_DBG1(("ossaSMPAbortCB: wrong flag %d\n", flag)); } smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yu"); return; } /*****************************************************************************/ /*! \brief ossaGeneralEvent * * * Purpose: This is the event notification for debugging purposes sent to * inform the OS layer of some general error related to a specific * inbound operation. * * \param agRoot: Pointer to chip/driver Instance. * \param status: Status associated with this event * \param msg: Pointer to controller specific command * massage that caused the error * * \return None. * */ /*****************************************************************************/ GLOBAL void ossaGeneralEvent( agsaRoot_t *agRoot, bit32 status, agsaContext_t *agContext, bit32 *msg) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG1(("ossaGeneralEvent: start\n")); TI_DBG1(("ossaGeneralEvent: status %d\n", status)); if(msg) { TI_DBG1(("ossaGeneralEvent: *msg %X\n", *msg)); } smTraceFuncEnter(hpDBG_VERY_LOUD,"Yv"); ostiGenEventIOCTLRsp(tiRoot, status); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yv"); return; } GLOBAL void ossaGetForensicDataCB ( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, agsaForensicData_t *forensicData) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; ostiGetForensicDataIOCTLRsp(tiRoot, status, forensicData); return; } #ifdef INITIATOR_DRIVER GLOBAL void ossaGetIOErrorStatsCB ( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, agsaIOErrorEventStats_t *stats) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; ostiGetIoErrorStatsIOCTLRsp(tiRoot, status, stats); } #else GLOBAL void ossaGetIOErrorStatsCB ( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, agsaIOErrorEventStats_t *stats) { } #endif GLOBAL void ossaGetIOEventStatsCB ( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, agsaIOErrorEventStats_t *stats) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; ostiGetIoEventStatsIOCTLRsp(tiRoot, status, stats); } /*****************************************************************************/ /*! \brief ossaGetRegisterDumpCB * * * Purpose: ossaGetRegisterDumpCB() is the response callback function * called by the LL Layer to indicate a response to * saGetRegisterDump() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally * passed into saGetRegisterDump() * \param status: status * * \return None. * */ /*****************************************************************************/ GLOBAL void ossaGetRegisterDumpCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; TI_DBG4(("ossaGetRegisterDumpCB: start\n")); TI_DBG4(("ossaGetRegisterDumpCB: status %d\n", status)); smTraceFuncEnter(hpDBG_VERY_LOUD,"Yw"); ostiRegDumpIOCTLRsp(tiRoot, status); smTraceFuncExit(hpDBG_VERY_LOUD, 'a', "Yw"); return; } /*****************************************************************************/ /*! \brief ossaSetDeviceStateCB * * * Purpose: ossaSetDeviceStateCB() is the response callback function * called by the LL Layer to indicate a response to * saSetDeviceState() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally * passed into saGetRegisterDump() * \param agDevHandle Pointer to the device handle of the device * \param status: status * \param newDeviceState: newly set device status * \param previousDeviceState: old device status * * \return None. * */ /*****************************************************************************/ GLOBAL void ossaSetDeviceStateCB( agsaRoot_t *agRoot, agsaContext_t *agContext, agsaDevHandle_t *agDevHandle, bit32 status, bit32 newDeviceState, bit32 previousDeviceState ) { tdsaDeviceData_t *oneDeviceData = agNULL; TI_DBG2(("ossaSetDeviceStateCB: start\n")); TI_DBG2(("ossaSetDeviceStateCB: status %d\n", status)); TI_DBG2(("ossaSetDeviceStateCB: newDeviceState %d\n", newDeviceState)); TI_DBG2(("ossaSetDeviceStateCB: previousDeviceState %d\n", previousDeviceState)); if (agDevHandle == agNULL) { TI_DBG4(("ossaSetDeviceStateCB: agDevHandle is NULL\n")); return; } oneDeviceData = (tdsaDeviceData_t *)agDevHandle->osData; if (oneDeviceData == agNULL) { TI_DBG1(("ossaSetDeviceStateCB: wrong; oneDeviceData is NULL\n")); } else { TI_DBG2(("ossaSetDeviceStateCB: did %d\n", oneDeviceData->id)); } return; } /*****************************************************************************/ /*! \brief ossaGetDeviceStateCB * * * Purpose: ossaGetDeviceStateCB() is the response callback function * called by the LL Layer to indicate a response to * saGetDeviceState() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally * passed into saGetRegisterDump() * \param agDevHandle Pointer to the device handle of the device * \param status: status * \param deviceState: device status * * \return None. * */ /*****************************************************************************/ GLOBAL void ossaGetDeviceStateCB( agsaRoot_t *agRoot, agsaContext_t *agContext, agsaDevHandle_t *agDevHandle, bit32 status, bit32 deviceState ) { TI_DBG4(("ossaGetDeviceStateCB: start\n")); TI_DBG4(("ossaGetDeviceStateCB: status %d\n", status)); TI_DBG4(("ossaGetDeviceStateCB: deviceState %d\n", deviceState)); return; } #ifdef INITIATOR_DRIVER /*****************************************************************************/ /*! \brief ossaIniSetDeviceInfoCB * * * Purpose: ossaIniSetDeviceInfoCB() is the response callback function * called by the LL Layer to indicate a response to * saSetDeviceInfo() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally * passed into saSetDeviceInfo() * \param agDevHandle Pointer to the device handle of the device * \param status: status * \param option: option parameter passed in saSetDeviceInfo() * \param param: param parameter passed in saSetDeviceInfo() * * \return None. * */ /*****************************************************************************/ osGLOBAL void ossaIniSetDeviceInfoCB( agsaRoot_t *agRoot, agsaContext_t *agContext, agsaDevHandle_t *agDevHandle, bit32 status, bit32 option, bit32 param ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; bit32 intContext = osData->IntContext; tdIORequestBody_t *tdIORequestBody = agNULL; agsaIORequest_t *agIORequest = agNULL; bit32 saStatus = AGSA_RC_FAILURE; bit8 devType_S_Rate; tdsaDeviceData_t *oneDeviceData = agNULL; TI_DBG4(("ossaIniSetDeviceInfoCB: start\n")); TI_DBG4(("ossaIniSetDeviceInfoCB: status 0x%x\n", status)); TI_DBG4(("ossaIniSetDeviceInfoCB: option 0x%x\n", option)); TI_DBG4(("ossaIniSetDeviceInfoCB: param 0x%x\n", param)); if (status != OSSA_SUCCESS) { TI_DBG1(("ossaIniSetDeviceInfoCB: status %d\n", status)); TI_DBG1(("ossaIniSetDeviceInfoCB: option 0x%x\n", option)); TI_DBG1(("ossaIniSetDeviceInfoCB: param 0x%x\n", param)); if (option == 32) /* set connection rate */ { TI_DBG1(("ossaIniSetDeviceInfoCB: IO failure\n")); agIORequest = (agsaIORequest_t *)agContext->osData; tdIORequestBody = (tdIORequestBody_t *)agIORequest->osData; ostiInitiatorIOCompleted( tiRoot, tdIORequestBody->tiIORequest, tiIOFailed, tiDetailOtherError, agNULL, intContext ); } } if (agDevHandle == agNULL) { TI_DBG4(("ossaIniSetDeviceInfoCB: agDevHandle is NULL\n")); return; } oneDeviceData = (tdsaDeviceData_t *)agDevHandle->osData; if (oneDeviceData == agNULL) { TI_DBG1(("ossaIniSetDeviceInfoCB: wrong; oneDeviceData is NULL\n")); return; } else { TI_DBG4(("ossaIniSetDeviceInfoCB: did %d\n", oneDeviceData->id)); } /* retry IOs */ if (option == 32) /* set connection rate */ { TI_DBG1(("ossaIniSetDeviceInfoCB: set connection rate option\n")); agIORequest = (agsaIORequest_t *)agContext->osData; tdIORequestBody = (tdIORequestBody_t *)agIORequest->osData; devType_S_Rate = oneDeviceData->agDeviceInfo.devType_S_Rate; devType_S_Rate = (devType_S_Rate & 0xF0) | (param >> 28); oneDeviceData->agDeviceInfo.devType_S_Rate = devType_S_Rate; TI_DBG1(("ossaIniSetDeviceInfoCB: new rate is 0x%x\n", DEVINFO_GET_LINKRATE(&oneDeviceData->agDeviceInfo))); if (oneDeviceData->valid == agTRUE && oneDeviceData->registered == agTRUE && oneDeviceData->tdPortContext != agNULL ) { saStatus = saSSPStart(agRoot, agIORequest, tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, tdIORequestBody->agRequestType, &(tdIORequestBody->transport.SAS.agSASRequestBody), agNULL, &ossaSSPCompleted); if (saStatus == AGSA_RC_SUCCESS) { TI_DBG1(("ossaIniSetDeviceInfoCB: retried\n")); Initiator->NumIOsActive++; tdIORequestBody->ioStarted = agTRUE; tdIORequestBody->ioCompleted = agFALSE; return; } else { TI_DBG1(("ossaIniSetDeviceInfoCB: retry failed\n")); tdIORequestBody->ioStarted = agFALSE; tdIORequestBody->ioCompleted = agTRUE; ostiInitiatorIOCompleted( tiRoot, tdIORequestBody->tiIORequest, tiIOFailed, tiDetailOtherError, agNULL, intContext ); } } } return; } #endif /*****************************************************************************/ /*! \brief ossaSetDeviceInfoCB * * * Purpose: ossaSetDeviceInfoCB() is the response callback function * called by the LL Layer to indicate a response to * saSetDeviceInfo() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally * passed into saSetDeviceInfo() * \param agDevHandle Pointer to the device handle of the device * \param status: status * \param option: option parameter passed in saSetDeviceInfo() * \param param: param parameter passed in saSetDeviceInfo() * * \return None. * */ /*****************************************************************************/ GLOBAL void ossaSetDeviceInfoCB( agsaRoot_t *agRoot, agsaContext_t *agContext, agsaDevHandle_t *agDevHandle, bit32 status, bit32 option, bit32 param ) { tdsaDeviceData_t *oneDeviceData = agNULL; TI_DBG4(("ossaSetDeviceInfoCB: start\n")); TI_DBG4(("ossaSetDeviceInfoCB: status 0x%x\n", status)); TI_DBG4(("ossaSetDeviceInfoCB: option 0x%x\n", option)); TI_DBG4(("ossaSetDeviceInfoCB: param 0x%x\n", param)); if (status != OSSA_SUCCESS) { TI_DBG1(("ossaSetDeviceInfoCB: status %d\n", status)); TI_DBG1(("ossaSetDeviceInfoCB: option 0x%x\n", option)); TI_DBG1(("ossaSetDeviceInfoCB: param 0x%x\n", param)); } if (agDevHandle == agNULL) { TI_DBG4(("ossaSetDeviceInfoCB: agDevHandle is NULL\n")); return; } oneDeviceData = (tdsaDeviceData_t *)agDevHandle->osData; if (oneDeviceData == agNULL) { TI_DBG1(("ossaSetDeviceInfoCB: wrong; oneDeviceData is NULL\n")); } else { TI_DBG4(("ossaSetDeviceInfoCB: did %d\n", oneDeviceData->id)); } return; } /*****************************************************************************/ /*! \brief ossaGetDFEDataCB * * * Purpose: ossaGetDFEDataCB() is the response callback function * called by the LL Layer to indicate a response to * saGetDFEData() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally * passed into saGetDFEData() * \param status: status * \param agInfoLen: length in bytes of DFE data captured and transferred * * \return None. * */ /*****************************************************************************/ GLOBAL void ossaGetDFEDataCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 agInfoLen) { TI_DBG1(("ossaGetDFEDataCB: start\n")); TI_DBG1(("ossaGetDFEDataCB: status 0x%x agInfoLen 0x%x\n", status, agInfoLen)); return; } /*****************************************************************************/ /*! \brief ossaVhistCaptureCB * * * Purpose: ossaVhistCaptureCB() is the response callback function * called by the LL Layer to indicate a response to * saGetDFEData() * * \param agRoot: Pointer to chip/driver Instance. * \param agContext: Context of the operation originally * passed into () * \param status: status * \param len: length in bytes of Vis data captured and transferred * * \return None. * */ /*****************************************************************************/ void ossaVhistCaptureCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 len) { TI_DBG1(("ossaVhistCaptureCB: start\n")); TI_DBG1(("ossaVhistCaptureCB: status 0x%x agInfoLen 0x%x\n", status,len )); return; } GLOBAL void ossaOperatorManagementCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 eq ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tiEncryptPort_t encryptEventData; TI_DBG1(("ossaOperatorManagementCB: status 0x%x eq 0x%x\n", status, eq)); osti_memset(&encryptEventData, 0, sizeof(tiEncryptPort_t)); encryptEventData.encryptEvent = tiEncryptOperatorManagement; encryptEventData.subEvent = eq; encryptEventData.pData = agNULL; ostiPortEvent(tiRoot, tiEncryptOperation, status, &encryptEventData); } GLOBAL void ossaEncryptSelftestExecuteCB ( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 type, bit32 length, void *TestResult ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tiEncryptPort_t encryptEventData; TI_DBG1(("ossaEncryptSelftestExecuteCB: status 0x%x type 0x%x length 0x%x\n", status, type, length)); osti_memset(&encryptEventData, 0, sizeof(tiEncryptPort_t)); encryptEventData.encryptEvent = tiEncryptSelfTest; encryptEventData.subEvent = type; encryptEventData.pData = (void*)TestResult; ostiPortEvent(tiRoot, tiEncryptOperation, status, &encryptEventData); } GLOBAL void ossaGetOperatorCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 option, bit32 num, bit32 role, agsaID_t *id ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tiEncryptPort_t encryptEventData; TI_DBG1(("ossaGetOperatorCB: status 0x%x option 0x%x num 0x%x role 0x%x\n", status, option, num, role)); TI_DBG1(("ossaGetOperatorCB: agContext %p id %p\n",agContext,id)); osti_memset(&encryptEventData, 0, sizeof(tiEncryptPort_t)); encryptEventData.encryptEvent = tiEncryptGetOperator; encryptEventData.subEvent = option; encryptEventData.pData = agNULL; switch(status) { case OSSA_IO_SUCCESS: TI_DBG1(("ossaGetOperatorCB: OSSA_IO_SUCCESS option 0x%x\n", option)); if(option == 1) { TI_DBG2(("ossaGetOperatorCB: 0x%02x 0x%02x 0x%02x 0x%02x\n",id->ID[0], id->ID[1], id->ID[2], id->ID[3])); TI_DBG2(("ossaGetOperatorCB: 0x%02x 0x%02x 0x%02x 0x%02x\n",id->ID[4], id->ID[5], id->ID[6], id->ID[7])); TI_DBG2(("ossaGetOperatorCB: 0x%02x 0x%02x 0x%02x 0x%02x\n",id->ID[8], id->ID[9], id->ID[10],id->ID[11])); TI_DBG2(("ossaGetOperatorCB: 0x%02x 0x%02x 0x%02x 0x%02x\n",id->ID[12],id->ID[13],id->ID[14],id->ID[15])); TI_DBG2(("ossaGetOperatorCB: 0x%02x 0x%02x 0x%02x 0x%02x\n",id->ID[16],id->ID[17],id->ID[18],id->ID[19])); TI_DBG2(("ossaGetOperatorCB: 0x%02x 0x%02x 0x%02x 0x%02x\n",id->ID[20],id->ID[21],id->ID[22],id->ID[23])); TI_DBG2(("ossaGetOperatorCB: 0x%02x 0x%02x 0x%02x 0x%02x\n",id->ID[24],id->ID[25],id->ID[26],id->ID[27])); TI_DBG2(("ossaGetOperatorCB: 0x%02x 0x%02x 0x%02x\n", id->ID[28],id->ID[29],id->ID[30])); }else if(option == 2) { TI_DBG1(("ossaGetOperatorCB: number operators 0x%02x\n", num )); } encryptEventData.pData = id; break; case OSSA_MPI_ENC_ERR_UNSUPPORTED_OPTION: TI_DBG1(("ossaGetOperatorCB: OSSA_MPI_ENC_ERR_UNSUPPORTED_OPTION 0x%x\n",option)); break; case OSSA_MPI_ENC_ERR_ID_TRANSFER_FAILURE: TI_DBG1(("ossaGetOperatorCB: OSSA_MPI_ENC_ERR_ID_TRANSFER_FAILURE 0x%x\n",option)); break; default: TI_DBG1(("ossaGetOperatorCB: Unknown status 0x%x\n",status)); } ostiPortEvent(tiRoot, tiEncryptOperation, status, &encryptEventData); } GLOBAL void ossaSetOperatorCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, bit32 eq ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tiEncryptPort_t encryptEventData; TI_DBG1(("ossaSetOperatorCB: agContext %p status 0x%x eq 0x%x\n",agContext, status, eq)); osti_memset(&encryptEventData, 0, sizeof(tiEncryptPort_t)); encryptEventData.encryptEvent = tiEncryptSetOperator; encryptEventData.subEvent = 0; switch(status) { case OSSA_IO_SUCCESS: TI_DBG1(("ossaSetOperatorCB: OSSA_IO_SUCCESS\n")); encryptEventData.pData = agNULL; break; case OSSA_MPI_ENC_ERR_CONTROLLER_NOT_IDLE: TI_DBG1(("ossaSetOperatorCB: OSSA_MPI_ENC_ERR_CONTROLLER_NOT_IDLE\n")); break; case OSSA_MPI_ENC_OPERATOR_AUTH_FAILURE: TI_DBG1(("ossaSetOperatorCB: OSSA_MPI_ENC_OPERATOR_AUTH_FAILURE error qualifier 0x%x\n",eq)); break; case OSSA_MPI_ENC_OPERATOR_OPERATOR_ALREADY_LOGGED_IN: TI_DBG1(("ossaSetOperatorCB: OSSA_MPI_ENC_OPERATOR_OPERATOR_ALREADY_LOGGED_IN\n")); break; case OSSA_MPI_ENC_OPERATOR_ILLEGAL_PARAMETER: TI_DBG1(("ossaSetOperatorCB: OSSA_MPI_ENC_OPERATOR_ILLEGAL_PARAMETER\n")); break; case OSSA_MPI_ENC_ERR_UNSUPPORTED_OPTION: TI_DBG1(("ossaSetOperatorCB: OSSA_MPI_ENC_ERR_UNSUPPORTED_OPTION\n")); break; case OSSA_MPI_ENC_ERR_ID_TRANSFER_FAILURE: TI_DBG1(("ossaSetOperatorCB: OSSA_MPI_ENC_ERR_ID_TRANSFER_FAILURE\n")); break; default: TI_DBG1(("ossaGetOperatorCB: Unknown status 0x%x\n",status)); } ostiPortEvent(tiRoot, tiEncryptOperation, status, &encryptEventData); } GLOBAL void ossaDIFEncryptionOffloadStartCB( agsaRoot_t *agRoot, agsaContext_t *agContext, bit32 status, agsaOffloadDifDetails_t *agsaOffloadDifDetails) { TI_DBG1(("ossaDIFEncryptionOffloadStartCB: start\n")); TI_DBG1(("ossaDIFEncryptionOffloadStartCB: status 0x%x agsaOffloadDifDetails=%p\n", status, agsaOffloadDifDetails)); return; } GLOBAL bit32 ossaTimeStamp( agsaRoot_t *agRoot ) { tdsaRootOsData_t *osData= agNULL; tiRoot_t *tiRoot= agNULL; if(agRoot) { osData = (tdsaRootOsData_t *)agRoot->osData; } if(osData) { tiRoot = (tiRoot_t *)osData->tiRoot; } return(ostiTimeStamp(tiRoot)); } GLOBAL bit64 ossaTimeStamp64( agsaRoot_t *agRoot) { tdsaRootOsData_t *osData= agNULL; tiRoot_t *tiRoot= agNULL; if(agRoot) { osData = (tdsaRootOsData_t *)agRoot->osData; } if(osData) { tiRoot = (tiRoot_t *)osData->tiRoot; } return(ostiTimeStamp64(tiRoot)); } #ifdef FDS_SM osGLOBAL void tdIDStartTimer(tiRoot_t *tiRoot, smIORequest_t *smIORequest, tdsaDeviceData_t *oneDeviceData ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; TI_DBG1(("tdIDStartTimer: start\n")); tdsaSingleThreadedEnter(tiRoot, TD_TIMER_LOCK); if (oneDeviceData->tdIDTimer.timerRunning == agTRUE) { tdsaSingleThreadedLeave(tiRoot, TD_TIMER_LOCK); tdsaKillTimer( tiRoot, &oneDeviceData->tdIDTimer ); } else { tdsaSingleThreadedLeave(tiRoot, TD_TIMER_LOCK); } tdsaSetTimerRequest( tiRoot, &oneDeviceData->tdIDTimer, SATA_ID_DEVICE_DATA_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick, tdIDStartTimerCB, smIORequest, oneDeviceData, agNULL ); tdsaAddTimer( tiRoot, &Initiator->timerlist, &oneDeviceData->tdIDTimer ); TI_DBG1(("tdIDStartTimer: end\n")); return; } osGLOBAL void tdIDStartTimerCB( tiRoot_t * tiRoot, void * timerData1, void * timerData2, void * timerData3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; smIORequest_t *smIORequest; tdsaDeviceData_t *oneDeviceData; smRoot_t *smRoot; tdIORequestBody_t *tdIORequestBody; smDeviceHandle_t *smDeviceHandle; tdsaPortContext_t *onePortContext; #ifdef REMOVED agsaRoot_t *agRoot; bit32 IDstatus; //#endif //#ifdef REMOVED agsaIORequest_t *agAbortIORequest = agNULL; tdIORequestBody_t *tdAbortIORequestBody = agNULL; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; void *osMemHandle; #endif // REMOVED #ifdef TD_DEBUG_ENABLE bit32 status = AGSA_RC_FAILURE; #endif TI_DBG1(("tdIDStartTimerCB start\n")); smIORequest = (smIORequest_t *)timerData1; oneDeviceData = (tdsaDeviceData_t *)timerData2; smRoot = &(tdsaAllShared->smRoot); #ifdef REMOVED agRoot = oneDeviceData->agRoot; #endif // REMOVED if (smIORequest == agNULL) { TI_DBG1(("tdIDStartTimerCB: smIORequest == agNULL !!!!!!\n")); return; } if (oneDeviceData == agNULL) { TI_DBG1(("tdIDStartTimerCB: oneDeviceData == agNULL !!!!!!\n")); return; } if (oneDeviceData->satDevData.IDPending == agFALSE || oneDeviceData->satDevData.IDDeviceValid == agTRUE) { /*the Identify Device command already normally completed, just return*/ return; } tdIORequestBody = (tdIORequestBody_t *)smIORequest->tdData; smDeviceHandle = (smDeviceHandle_t *)&(oneDeviceData->smDeviceHandle); onePortContext = oneDeviceData->tdPortContext; if (tdIORequestBody == agNULL) { TI_DBG1(("tdIDStartTimerCB: tdIORequestBody == agNULL !!!!!!\n")); return; } if (smDeviceHandle == agNULL) { TI_DBG1(("tdIDStartTimerCB: smDeviceHandle == agNULL !!!!!!\n")); return; } if (onePortContext == agNULL) { TI_DBG1(("tdIDStartTimerCB: onePortContext == agNULL !!!!!!\n")); return; } TI_DBG1(("tdIDStartTimerCB: did %d\n", oneDeviceData->id)); /* 1. smIOabort() 2. in tdsmIDCompletedCB(), retry */ if (oneDeviceData->valid == agFALSE) { TI_DBG1(("tdIDStartTimerCB: invalid device\n")); return; } #ifdef TD_DEBUG_ENABLE status = smIOAbort( smRoot, smIORequest ); #else smIOAbort( smRoot, smIORequest ); #endif #ifdef REMOVED /* allocating agIORequest for abort itself */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdAbortIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { /* let os process IO */ TI_DBG1(("tdIDStartTimerCB: ostiAllocMemory failed...; can't retry ID data \n")); return; } if (tdAbortIORequestBody == agNULL) { /* let os process IO */ TI_DBG1(("tdIDStartTimerCB: ostiAllocMemory returned NULL tdAbortIORequestBody; can't retry ID data\n")); return; } /* setup task management structure */ tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; /* setting callback but not used later */ tdAbortIORequestBody->IOCompletionFunc = agNULL; //tdAbortIORequestBody->IOCompletionFunc = itdssIOAbortedHandler; tdAbortIORequestBody->tiDevHandle = (tiDeviceHandle_t *)&(oneDeviceData->tiDeviceHandle); /* initialize agIORequest */ agAbortIORequest = &(tdAbortIORequestBody->agIORequest); agAbortIORequest->osData = (void *) tdAbortIORequestBody; agAbortIORequest->sdkData = agNULL; /* LL takes care of this */ //#endif //#ifdef REMOVED status = saSATAAbort(agRoot, agAbortIORequest, 0, oneDeviceData->agDevHandle, 1, /* abort all */ agNULL, ossaSATAIDAbortCB ); status = saSATAAbort(agRoot, agAbortIORequest, 0, oneDeviceData->agDevHandle, 0, /* abort one */ agIORequest, ossaSATAIDAbortCB ); //#endif //#ifdef REMOVED if (status != AGSA_RC_SUCCESS) { TI_DBG1(("tdIDStartTimerCB: saSATAAbort failed; can't retry ID data\n")); } if (oneDeviceData->satDevData.IDDeviceValid == agTRUE) { TI_DBG1(("tdIDStartTimerCB: IDDeviceValid is valid, no need to retry\n")); return; } if (tdIORequestBody->reTries <= SM_RETRIES) { tdIORequestBody->tiIORequest = agNULL; /* not in use */ tdIORequestBody->pid = onePortContext->id; smIORequest->tdData = tdIORequestBody; smIORequest->smData = &tdIORequestBody->smIORequestBody; smDeviceHandle->tdData = oneDeviceData; IDstatus = smIDStart(smRoot, smIORequest, smDeviceHandle ); if (IDstatus == SM_RC_SUCCESS) { TI_DBG1(("tdIDStartTimerCB: being retried!!!\n")); tdIORequestBody->reTries++; tdIORequestBody->ioCompleted = agFALSE; tdIORequestBody->ioStarted = agTRUE; tdIDStartTimer(tiRoot, smIORequest, oneDeviceData); } else { /* identify device data is not valid */ TI_DBG1(("tdIDStartTimerCB: smIDStart fail or busy %d!!!\n", IDstatus)); tdIORequestBody->reTries = 0; tdIORequestBody->ioCompleted = agTRUE; tdIORequestBody->ioStarted = agFALSE; ostiFreeMemory( tiRoot, tdIORequestBody->osMemHandle, sizeof(tdIORequestBody_t) ); oneDeviceData->satDevData.IDDeviceValid = agFALSE; smReportRemoval(tiRoot, agRoot, oneDeviceData, onePortContext); return; } } else { /* give up */ TI_DBG1(("tdIDStartTimerCB: retries are over!!!\n")); if (oneDeviceData->tdIDTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &oneDeviceData->tdIDTimer ); } tdIORequestBody->reTries = 0; tdIORequestBody->ioCompleted = agTRUE; tdIORequestBody->ioStarted = agFALSE; ostiFreeMemory( tiRoot, tdIORequestBody->osMemHandle, sizeof(tdIORequestBody_t) ); oneDeviceData->satDevData.IDDeviceValid = agFALSE; if (oneDeviceData->SMNumOfID <= 0) /* does SMP HARD RESET only upto one time */ { TI_DBG1(("tdIDStartTimerCB: fail; sending HARD_RESET\n")); oneDeviceData->SMNumOfID++; if (oneDeviceData->directlyAttached == agTRUE) { saLocalPhyControl(agRoot, agNULL, 0, oneDeviceData->phyID, AGSA_PHY_HARD_RESET, agNULL); } else { tdsaPhyControlSend(tiRoot, oneDeviceData, SMP_PHY_CONTROL_HARD_RESET, agNULL); } } else { /* given up after one time of SMP HARD RESET; */ TI_DBG1(("tdIDStartTimerCB: fail; but giving up sending HARD_RESET!!!\n")); if (oneDeviceData->directlyAttached == agTRUE) { smReportRemovalDirect(tiRoot, agRoot, oneDeviceData); } else { smReportRemoval(tiRoot, agRoot, oneDeviceData, onePortContext); } } } #endif // REMOVED TI_DBG1(("tdIDStartTimerCB: end, smIOAbort status %d\n", status)); return; } #endif // FDS_SM #if defined(FDS_DM) && defined(FDS_SM) //start here GLOBAL void tdIDStart( tiRoot_t *tiRoot, agsaRoot_t *agRoot, smRoot_t *smRoot, tdsaDeviceData_t *oneDeviceData, tdsaPortContext_t *onePortContext ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; bit32 SMstatus = SM_RC_FAILURE; tdIORequestBody_t *tdIORequestBody; smIORequest_t *smIORequest; smDeviceHandle_t *smDeviceHandle; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; void *osMemHandle; TI_DBG1(("tdIDStart: start, did %d\n",oneDeviceData->id)); if ( (DEVICE_IS_SATA_DEVICE(oneDeviceData)|| DEVICE_IS_STP_TARGET(oneDeviceData)) && oneDeviceData->satDevData.IDDeviceValid == agFALSE && oneDeviceData->satDevData.IDPending == agFALSE ) { TI_DBG2(("tdIDStart: in loop, did %d\n", oneDeviceData->id)); /* allocating tdIORequestBody */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess || tdIORequestBody == agNULL) { /* let os process IO */ TI_DBG1(("tdIDStart: ostiAllocMemory failed... or ostiAllocMemory returned NULL tdIORequestBody!!!\n")); oneDeviceData->satDevData.IDDeviceValid = agFALSE; if (oneDeviceData->directlyAttached == agTRUE) { /* notifying link up */ ostiPortEvent( tiRoot, tiPortLinkUp, tiSuccess, (void *)onePortContext->tiPortalContext ); #ifdef INITIATOR_DRIVER /* triggers discovery */ ostiPortEvent( tiRoot, tiPortDiscoveryReady, tiSuccess, (void *) onePortContext->tiPortalContext ); #endif } } else { /* initialize */ osti_memset(tdIORequestBody, 0, sizeof(tdIORequestBody_t)); tdIORequestBody->osMemHandle = osMemHandle; TI_DBG2(("tdIDStart: tdIORequestBody %p tdIORequestBody->osMemHandle %p\n", tdIORequestBody, tdIORequestBody->osMemHandle)); /* not in use */ tdIORequestBody->IOCompletionFunc = agNULL; tdIORequestBody->tiDevHandle = agNULL; tdIORequestBody->tiIORequest = agNULL; /* not in use */ tdIORequestBody->pid = onePortContext->id; tdIORequestBody->reTries = 0; smIORequest = (smIORequest_t *)&(tdIORequestBody->smIORequest); smIORequest->tdData = tdIORequestBody; smIORequest->smData = &tdIORequestBody->smIORequestBody; smDeviceHandle = (smDeviceHandle_t *)&(oneDeviceData->smDeviceHandle); smDeviceHandle->tdData = oneDeviceData; TI_DBG2(("tdIDStart: smIORequest %p\n", smIORequest)); SMstatus = smIDStart(smRoot, smIORequest, &(oneDeviceData->smDeviceHandle) ); if (SMstatus == SM_RC_SUCCESS) { if (oneDeviceData->directlyAttached == agTRUE) { TI_DBG2(("tdIDStart: successfully sent identify device data\n")); /* Add the devicedata to the mainlink */ tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); TDLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->MainLink), &(tdsaAllShared->MainDeviceList)); tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); TI_DBG6(("tdIDStart: one case did %d \n", oneDeviceData->id)); } oneDeviceData->satDevData.IDPending = agTRUE; /* start a timer */ tdIDStartTimer(tiRoot, smIORequest, oneDeviceData); } else { /* failed to send */ TI_DBG1(("tdIDStart: smIDStart fail or busy %d\n", SMstatus)); /* free up allocated memory */ ostiFreeMemory( tiRoot, tdIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); oneDeviceData->satDevData.IDDeviceValid = agFALSE; if (oneDeviceData->directlyAttached == agTRUE) { TI_DBG1(("tdIDStart: failed in sending identify device data\n")); /* put onedevicedata back to free list */ tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); TDLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceList)); tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); /* notifying link up */ ostiPortEvent( tiRoot, tiPortLinkUp, tiSuccess, (void *)onePortContext->tiPortalContext ); #ifdef INITIATOR_DRIVER /* triggers discovery */ ostiPortEvent( tiRoot, tiPortDiscoveryReady, tiSuccess, (void *) onePortContext->tiPortalContext ); #endif } else { smReportRemoval(tiRoot, agRoot, oneDeviceData, onePortContext); } } } } TI_DBG1(("tdIDStart: exit\n")); return; } #endif #ifdef SALLSDK_OS_IOMB_LOG_ENABLE GLOBAL void ossaLogIomb(agsaRoot_t *agRoot, bit32 queueNum, agBOOLEAN isInbound, void *pMsg, bit32 msgLength) { return; } #endif /* SALLSDK_OS_IOMB_LOG_ENABLE */ #ifndef SATA_ENABLE /* * These callback routines are defined in ossasat.c which are included in the * compilation if SATA_ENABLED is defined. */ /***************************************************************************** *! \brief ossaDiscoverSataCB * * Purpose: This function is called by lower layer to inform TD layer of * STP/SATA discovery results * * * \param agRoot Pointer to chip/driver Instance. * \param agPortContext Pointer to the port context of TD and Lower layer * \param event event type * \param pParm1 Pointer to data associated with event * \param pParm2 Pointer to data associated with event * * \return: none * * \note - For details, refer to SAS/SATA Low-Level API Specification * *****************************************************************************/ osGLOBAL void ossaDiscoverSataCB( agsaRoot_t *agRoot, agsaPortContext_t *agPortContext, bit32 event, void *pParm1, void *pParm2 ) { return; } /***************************************************************************** *! \brief ossaSATACompleted * * This routine is called to complete a SATA request previously issued to the * LL Layer in saSATAStart() * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param agIORequest: Pointer to the LL I/O request context for this I/O. * \param agIOStatus: Status of completed I/O. * \param agFirstDword:Pointer to the four bytes of FIS. * \param agIOInfoLen: Length in bytes of overrun/underrun residual or FIS * length. * \param agParam: Additional info based on status. * * \return: none * *****************************************************************************/ GLOBAL void ossaSATACompleted( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, void *agFirstDword, bit32 agIOInfoLen, void *agParam ) { return; } /***************************************************************************** *! \brief ossaSATAEvent * * This routine is called to notify the OS Layer of an event associated with * SATA port or SATA device * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param agIORequest: Pointer to the LL I/O request context for this I/O. * \param agPortContext Pointer to the port context of TD and Lower layer * \param agDevHandle: Pointer to a device handle * \param event: event type * * \return: none * *****************************************************************************/ osGLOBAL void ossaSATAEvent( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, agsaPortContext_t *agPortContext, agsaDevHandle_t *agDevHandle, bit32 event, bit32 agIOInfoLen, void *agParam ) { return; } /***************************************************************************** *! \brief ossaSATADeviceResetCB * * This routine is called to complete a SATA device reset request previously * issued to the LL Layer in saSATADeviceReset(). * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param agDevHandle: Pointer to a device handle * \param resetStatus: Reset status: * OSSA_SUCCESS: The reset operation completed successfully. * OSSA_FAILURE: The reset operation failed. * \param resetparm: Pointer to the Device-To-Host FIS received from the device. * * \return: none * *****************************************************************************/ osGLOBAL void ossaSATADeviceResetCB( agsaRoot_t *agRoot, agsaDevHandle_t *agDevHandle, bit32 resetStatus, void *resetparm) { return; } /***************************************************************************** *! \brief ossaDiscoverSasCB * * Purpose: This function is called by lower layer to inform TD layer of * SAS discovery results * * * \param agRoot Pointer to chip/driver Instance. * \param agPortContext Pointer to the port context of TD and Lower layer * \param event event type * \param pParm1 Pointer to data associated with event * \param pParm2 Pointer to data associated with event * * \return: none * * \note - For details, refer to SAS/SATA Low-Level API Specification * *****************************************************************************/ osGLOBAL void ossaDiscoverSasCB(agsaRoot_t *agRoot, agsaPortContext_t *agPortContext, bit32 event, void *pParm1, void *pParm2 ) { return; } #endif Index: head/sys/dev/pms/RefTisa/tisa/sassata/common/tdmisc.c =================================================================== --- head/sys/dev/pms/RefTisa/tisa/sassata/common/tdmisc.c (revision 359440) +++ head/sys/dev/pms/RefTisa/tisa/sassata/common/tdmisc.c (revision 359441) @@ -1,2901 +1,2901 @@ /******************************************************************************* *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the *following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided *with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED *WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS *FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT *NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR *BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ********************************************************************************/ /*******************************************************************************/ /** \file * * * This file contains TB misc. functions * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #ifdef FDS_SM #include #include #include #endif #ifdef FDS_DM #include #include #include #endif #include #include #include #ifdef INITIATOR_DRIVER #include #include #include #endif #ifdef TARGET_DRIVER #include #include #include #endif #include #include /***************************************************************************** *! \brief tiINIIOAbort * * Purpose: This function is called to abort an I/O request previously started * by a call to tiINIIOStart() or tiINIIOStartDif() . * * \param tiRoot: Pointer to initiator driver/port instance. * \param taskTag: Pointer to the associated task to be aborted * * \return: * * tiSuccess: I/O request successfully initiated. * tiBusy: No resources available, try again later. * tiIONoDevice: Invalid device handle. * tiError: Other errors that prevent the I/O request to be * started. * *****************************************************************************/ #ifdef INITIATOR_DRIVER /*TBD: INITIATOR SPECIFIC API in tiapi.h (TP)*/ osGLOBAL bit32 tiINIIOAbort( tiRoot_t *tiRoot, tiIORequest_t *taskTag ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = agNULL; tdIORequestBody_t *tdIORequestBody = agNULL; agsaIORequest_t *agIORequest = agNULL; bit32 sasStatus = AGSA_RC_FAILURE; tdsaDeviceData_t *oneDeviceData; bit32 status= tiError; agsaIORequest_t *agAbortIORequest; tdIORequestBody_t *tdAbortIORequestBody; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; void *osMemHandle; agsaDevHandle_t *agDevHandle = agNULL; #ifdef FDS_SM smRoot_t *smRoot; tdIORequestBody_t *ToBeAbortedtdIORequestBody; smIORequest_t *ToBeAborted = agNULL; #endif TI_DBG2(("tiINIIOAbort: start\n")); if(taskTag == agNULL) { TI_DBG1(("tiINIIOAbort: taskTag is NULL\n")); return tiError; } agRoot = &(tdsaAllShared->agRootNonInt); tdIORequestBody = (tdIORequestBody_t *)taskTag->tdData; agIORequest = &(tdIORequestBody->agIORequest); oneDeviceData = tdIORequestBody->tiDevHandle->tdData; if(oneDeviceData == agNULL) { TI_DBG1(("tiINIIOAbort: DeviceData is NULL\n")); return tiSuccess; } agDevHandle = oneDeviceData->agDevHandle; TI_DBG2(("tiINIIOAbort: did %d\n", oneDeviceData->id)); /* for hotplug */ if (oneDeviceData->valid != agTRUE || oneDeviceData->registered != agTRUE || oneDeviceData->tdPortContext == agNULL ) { TI_DBG1(("tiINIIOAbort: NO Device did %d\n", oneDeviceData->id )); TI_DBG1(("tiINIIOAbort: device AddrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG1(("tiINIIOAbort: device AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); return tiError; } /* allocating agIORequest for abort itself */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdAbortIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { /* let os process IO */ TI_DBG1(("tiINIIOAbort: ostiAllocMemory failed...\n")); return tiError; } if (tdAbortIORequestBody == agNULL) { /* let os process IO */ TI_DBG1(("tiINIIOAbort: ostiAllocMemory returned NULL tdAbortIORequestBody\n")); return tiError; } /* setup task management structure */ tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; /* setting callback */ tdAbortIORequestBody->IOCompletionFunc = itdssIOAbortedHandler; tdAbortIORequestBody->tiDevHandle = tdIORequestBody->tiDevHandle; /* initialize agIORequest */ agAbortIORequest = &(tdAbortIORequestBody->agIORequest); agAbortIORequest->osData = (void *) tdAbortIORequestBody; agAbortIORequest->sdkData = agNULL; /* LL takes care of this */ /* remember IO to be aborted */ tdAbortIORequestBody->tiIOToBeAbortedRequest = taskTag; if (oneDeviceData->DeviceType == TD_SAS_DEVICE) { sasStatus = saSSPAbort(agRoot, agAbortIORequest, tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, 0/* flag */, agIORequest, agNULL); if (sasStatus == AGSA_RC_SUCCESS) { return tiSuccess; } else { return tiError; } } else if (oneDeviceData->DeviceType == TD_SATA_DEVICE) { TI_DBG2(("tiINIIOAbort: calling satIOAbort() oneDeviceData=%p\n", oneDeviceData)); #ifdef FDS_SM smRoot = &(tdsaAllShared->smRoot); if ( taskTag != agNULL) { ToBeAbortedtdIORequestBody = (tdIORequestBody_t *)taskTag->tdData; ToBeAborted = &(ToBeAbortedtdIORequestBody->smIORequest); status = smIOAbort(smRoot, ToBeAborted); return status; } else { TI_DBG1(("tiINIIOAbort: taskTag is NULL!!!\n")); return tiError; } #else #ifdef SATA_ENABLE status = satIOAbort(tiRoot, taskTag ); #endif return status; #endif /* else FDS_SM */ } else { return tiError; } } osGLOBAL bit32 tiINIIOAbortAll( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle ) { agsaRoot_t *agRoot = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; bit32 status = tiError; #ifdef FDS_SM tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; smRoot_t *smRoot = &(tdsaAllShared->smRoot); smDeviceHandle_t *smDeviceHandle; #endif TI_DBG1(("tiINIIOAbortAll: start\n")); if (tiDeviceHandle == agNULL) { TI_DBG1(("tiINIIOAbortAll: tiDeviceHandle is NULL!!!\n")); return tiError; } oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; if (oneDeviceData == agNULL) { TI_DBG1(("tiINIIOAbortAll: oneDeviceData is NULL!!!\n")); return tiError; } /* for hotplug */ if (oneDeviceData->valid != agTRUE || oneDeviceData->registered != agTRUE || oneDeviceData->tdPortContext == agNULL ) { TI_DBG1(("tiINIIOAbortAll: NO Device did %d\n", oneDeviceData->id )); TI_DBG1(("tiINIIOAbortAll: device AddrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG1(("tiINIIOAbortAll: device AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); return tiError; } agRoot = oneDeviceData->agRoot; if (agRoot == agNULL) { TI_DBG1(("tiINIIOAbortAll: agRoot is NULL!!!\n")); return tiError; } /* this is processed in ossaSSPAbortCB, ossaSATAAbortCB, ossaSMPAbortCB */ if (oneDeviceData->OSAbortAll == agTRUE) { TI_DBG1(("tiINIIOAbortAll: already pending!!!\n")); return tiBusy; } else { oneDeviceData->OSAbortAll = agTRUE; } #ifdef FDS_SM if ( DEVICE_IS_SSP_TARGET(oneDeviceData) || DEVICE_IS_SMP_TARGET(oneDeviceData)) { status = tdsaAbortAll(tiRoot, agRoot, oneDeviceData); } else if (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData) ) { TI_DBG2(("tiINIIOAbortAll: calling smIOAbortAll\n")); smDeviceHandle = (smDeviceHandle_t *)&(oneDeviceData->smDeviceHandle); smDeviceHandle->tdData = oneDeviceData; status = smIOAbortAll(smRoot, smDeviceHandle); } else { TI_DBG1(("tiINIIOAbortAll: unknow device type!!! 0x%x\n", oneDeviceData->target_ssp_stp_smp)); status = AGSA_RC_FAILURE; } #else status = tdsaAbortAll(tiRoot, agRoot, oneDeviceData); #endif return status; } #endif /* INITIATOR_DRIVER */ /***************************************************************************** *! \brief tdsaAbortAll * * Purpose: This function is called to abort an all pending I/O request on a * device * * \param tiRoot: Pointer to initiator driver/port instance. * \param agRoot: Pointer to chip/driver Instance. * \param oneDeviceData: Pointer to the device * * \return: * * None * *****************************************************************************/ osGLOBAL bit32 tdsaAbortAll( tiRoot_t *tiRoot, agsaRoot_t *agRoot, tdsaDeviceData_t *oneDeviceData ) { agsaIORequest_t *agAbortIORequest = agNULL; tdIORequestBody_t *tdAbortIORequestBody = agNULL; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; void *osMemHandle; bit32 status = AGSA_RC_FAILURE; TI_DBG1(("tdsaAbortAll: did %d\n", oneDeviceData->id)); /* allocating agIORequest for abort itself */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdAbortIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { /* let os process IO */ TI_DBG1(("tdsaAbortAll: ostiAllocMemory failed...\n")); return tiError; } if (tdAbortIORequestBody == agNULL) { /* let os process IO */ TI_DBG1(("tdsaAbortAll: ostiAllocMemory returned NULL tdAbortIORequestBody\n")); return tiError; } /* setup task management structure */ tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; /* setting callback but not used later */ tdAbortIORequestBody->IOCompletionFunc = agNULL; //tdAbortIORequestBody->IOCompletionFunc = itdssIOAbortedHandler; tdAbortIORequestBody->tiDevHandle = (tiDeviceHandle_t *)&(oneDeviceData->tiDeviceHandle); /* initialize agIORequest */ agAbortIORequest = &(tdAbortIORequestBody->agIORequest); agAbortIORequest->osData = (void *) tdAbortIORequestBody; agAbortIORequest->sdkData = agNULL; /* LL takes care of this */ if ( DEVICE_IS_SSP_TARGET(oneDeviceData)) { /* SSPAbort */ status = saSSPAbort(agRoot, agAbortIORequest, tdsaRotateQnumber(tiRoot, oneDeviceData), //0, oneDeviceData->agDevHandle, 1, /* abort all */ agNULL, agNULL ); } else if (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData) ) { /* SATAAbort*/ if (oneDeviceData->satDevData.IDDeviceValid == agFALSE) { TI_DBG2(("tdsaAbortAll: saSATAAbort\n")); status = saSATAAbort(agRoot, agAbortIORequest, 0, oneDeviceData->agDevHandle, 1, /* abort all */ agNULL, agNULL ); } else { TI_DBG2(("tdsaAbortAll: saSATAAbort IDDeviceValid\n")); status = saSATAAbort(agRoot, agAbortIORequest, tdsaRotateQnumber(tiRoot, oneDeviceData), //0, oneDeviceData->agDevHandle, 1, /* abort all */ agNULL, agNULL ); } } else if (DEVICE_IS_SMP_TARGET(oneDeviceData)) { /* SMPAbort*/ TI_DBG2(("tdsaAbortAll: saSMPAbort \n")); status = saSMPAbort(agRoot, agAbortIORequest, tdsaRotateQnumber(tiRoot, oneDeviceData), //0, oneDeviceData->agDevHandle, 1, /* abort all */ agNULL, agNULL ); } else { TI_DBG1(("tdsaAbortAll: unknown device type!!! 0x%x\n", oneDeviceData->target_ssp_stp_smp)); status = AGSA_RC_FAILURE; } if (status == AGSA_RC_SUCCESS) { return tiSuccess; } else { TI_DBG1(("tdsaAbortAll: failed status=%d\n", status)); //failed to send abort command, we need to free the memory ostiFreeMemory( tiRoot, tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return tiError; } } /***************************************************************************** *! \brief tiCOMReset * * Purpose: This function is called to trigger soft or hard reset * * \param tiRoot: Pointer to initiator driver/port instance. * \param option: Options * * \return: * * None * *****************************************************************************/ osGLOBAL void tiCOMReset( tiRoot_t *tiRoot, bit32 option ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = agNULL; #ifdef TI_GETFOR_ONRESET agsaControllerStatus_t controllerStatus; agsaForensicData_t forensicData; bit32 once = 1; bit32 status; #endif /* TI_GETFOR_ONRESET */ TI_DBG1(("tiCOMReset: start option 0x%x\n",option)); tdsaAllShared->resetCount++; TI_DBG2(("tiCOMReset: reset count %d\n", tdsaAllShared->resetCount)); agRoot = &(tdsaAllShared->agRootNonInt); if (tdsaAllShared->flags.resetInProgress == agTRUE) { TI_DBG1(("tiCOMReset : Reset is already in progress : \n")); /* don't do anything : just return */ return; } tdsaAllShared->flags.resetInProgress = agTRUE; #ifdef TI_GETFOR_ONRESET saGetControllerStatus(agRoot, &controllerStatus); if(controllerStatus.fatalErrorInfo.errorInfo1) { bit8 * DirectData = (bit8 * )tdsaAllShared->FatalErrorData; forensicData.DataType = TYPE_FATAL; forensicData.dataBuf.directLen = (8 * 1024); forensicData.dataBuf.directOffset = 0; /* current offset */ forensicData.dataBuf.readLen = 0; /* Data read */ getmoreData: forensicData.dataBuf.directData = DirectData; status = saGetForensicData( agRoot, agNULL, &forensicData); TI_DBG1(("tiCOMReset:status %d readLen 0x%x directLen 0x%x directOffset 0x%x\n", status, forensicData.dataBuf.readLen, forensicData.dataBuf.directLen, forensicData.dataBuf.directOffset)); if( forensicData.dataBuf.readLen == forensicData.dataBuf.directLen && !status && once) { DirectData += forensicData.dataBuf.readLen; goto getmoreData; } TI_DBG1(("tiCOMReset:saGetForensicData type %d read 0x%x bytes\n", forensicData.DataType, forensicData.dataBuf.directOffset )); } #endif /* TI_GETFOR_ONRESET */ if (option == tiSoftReset) { /* soft reset */ TI_DBG6(("tiCOMReset: soft reset\n")); saHwReset(agRoot, AGSA_SOFT_RESET, 0); return; } else { saHwReset(agRoot, AGSA_SOFT_RESET, 0); #ifdef NOT_YET /* hard reset */ saHwReset(agRoot, AGSA_CHIP_RESET, 0); #endif } return; } /*****************************************************************************/ /*! \biref tiINIReportErrorToEventLog * * Purpose: This function is called to report errors that needs to be logged * into event log. * * \param tiRoot: Pointer to initiator specific root data structure for this * instance of the driver. * \param agEventData: Event data structure. * * \return None. * */ /*****************************************************************************/ #ifdef INITIATOR_DRIVER osGLOBAL bit32 tiINIReportErrorToEventLog( tiRoot_t *tiRoot, tiEVTData_t *agEventData ) { TI_DBG6(("tiINIReportErrorToEventLog: start\n")); return tiError; } #endif /* INITIATOR_DRIVER */ /*****************************************************************************/ /*! \brief ossaReenableInterrupts * * * Purpose: This routine is called to enable interrupt * * * \param agRoot: Pointer to chip/driver Instance. * \param outboundChannelNum: Zero-base channel number * * * \return None. * * \note - The scope is shared target and initiator. * */ /*****************************************************************************/ #ifndef ossaReenableInterrupts osGLOBAL void ossaReenableInterrupts( agsaRoot_t *agRoot, bit32 outboundChannelNum ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *) (agRoot->osData); ostiInterruptEnable( osData->tiRoot, outboundChannelNum ); return; } #endif /* 1. initiator send task management call saSSPAbort() 2. Target call saSSPAbort() */ /***************************************************************************** *! \brief tiINITaskManagement * * Purpose: This routine is called to explicitly ask the Transport Dependent * Layer to issue a Task Management command to a device. * * \param tiRoot: Pointer to driver instance * \param tiDeviveHandle: Pointer to the device handle for this session. * \param task: SAM-2 task management request. * \param lun: Pointer to the SCSI-3 LUN information * when applicable. Set to zero when not applicable. * \param taskTag: Pointer to the associated task where the task * management command is to be applied. Set to agNULL * if not applicable for the specific Task Management * task. * \param currentTaskTag: The current context or task tag for this task. This * task tag will be passed back in ostiInitiatorEvent() * when this task management is completed. * * \return: * tiSuccess TM request successfully initiated. * tiBusy No resources available, try again later. * tiIONoDevice Invalid device handle. * tiError Other errors that prevent the TM request to be started. * *****************************************************************************/ /* warm reset->smp phy control(hard reset) or saLocalPhyControl(AGSA_PHY_HARD_RESET) */ #ifdef INITIATOR_DRIVER osGLOBAL bit32 tiINITaskManagement ( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle, bit32 task, tiLUN_t *lun, tiIORequest_t *taskTag, /* being aborted one */ tiIORequest_t *currentTaskTag /* task management itself */ ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; agsaRoot_t *agRoot = agNULL; bit32 tiStatus = tiError; bit32 notImplemented = agFALSE; tdsaDeviceData_t *oneDeviceData = agNULL; void *osMemHandle; tdIORequestBody_t *TMtdIORequestBody; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; bit32 agRequestType; agsaIORequest_t *agIORequest = agNULL; /* task management itself */ agsaIORequest_t *agTMRequest = agNULL; /* IO being task managed */ agsaDevHandle_t *agDevHandle = agNULL; agsaSASRequestBody_t *agSASRequestBody = agNULL; agsaSSPScsiTaskMgntReq_t *agSSPTaskMgntRequest; bit32 saStatus; tdIORequestBody_t *tdIORequestBody; #ifdef FDS_SM smRoot_t *smRoot; smDeviceHandle_t *smDeviceHandle; smIORequest_t *ToBeAborted = agNULL; smIORequest_t *TaskManagement; tdIORequestBody_t *ToBeAbortedtdIORequestBody; tdIORequestBody_t *SMTMtdIORequestBody; void *SMosMemHandle; bit32 SMPhysUpper32; bit32 SMPhysLower32; bit32 SMmemAllocStatus; #endif TI_DBG2(("tiINITaskManagement: start\n")); /* just for testing only */ #ifdef REMOVED //start temp if(tiDeviceHandle == agNULL) { TI_DBG1(("tiINITaskManagement: tiDeviceHandle is NULL\n")); return tiError; } oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; if(oneDeviceData == agNULL) { TI_DBG1(("tiINITaskManagement: tiDeviceHandle=%p DeviceData is NULL\n", tiDeviceHandle)); return tiError; } TI_DBG1(("tiINITaskManagement: did %d\n", oneDeviceData->id )); return tiError; //end temp // just for testing if (task == AG_LOGICAL_UNIT_RESET) { TI_DBG1(("tiINITaskManagement: failing LUN RESET for testing\n")); return tiError; } #endif switch(task) { case AG_ABORT_TASK: TI_DBG6(("tiINITaskManagement: ABORT_TASK\n")); break; case AG_ABORT_TASK_SET: TI_DBG6(("tiINITaskManagement: ABORT_TASK_SET\n")); break; case AG_CLEAR_ACA: TI_DBG6(("tiINITaskManagement: CLEAR_ACA\n")); break; case AG_CLEAR_TASK_SET: TI_DBG6(("tiINITaskManagement: CLEAR_TASK_SET\n")); break; case AG_LOGICAL_UNIT_RESET: TI_DBG6(("tiINITaskManagement: LOGICAL_UNIT_RESET\n")); break; case AG_TARGET_WARM_RESET: TI_DBG6(("tiINITaskManagement: TARGET_WARM_RESET\n")); break; case AG_QUERY_TASK: TI_DBG6(("tiINITaskManagement: QUERY_TASK\n")); break; default: TI_DBG1(("tiINITaskManagement: notImplemented 0x%0x !!!\n",task)); notImplemented = agTRUE; break; } if (notImplemented) { TI_DBG1(("tiINITaskManagement: not implemented 0x%0x !!!\n",task)); return tiStatus; } if(tiDeviceHandle == agNULL) { TI_DBG1(("tiINITaskManagement: tiDeviceHandle is NULL\n")); return tiError; } oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; if(oneDeviceData == agNULL) { TI_DBG1(("tiINITaskManagement: tiDeviceHandle=%p DeviceData is NULL\n", tiDeviceHandle)); return tiIONoDevice; } /* for hotplug */ if (oneDeviceData->valid != agTRUE || oneDeviceData->registered != agTRUE || oneDeviceData->tdPortContext == agNULL ) { TI_DBG1(("tiINITaskManagement: NO Device did %d Addr 0x%08x:0x%08x\n", oneDeviceData->id , oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); return tiIONoDevice; } /* 1. call tiINIOAbort() 2. call tdssTaskXmit() */ if (oneDeviceData->DeviceType == TD_SAS_DEVICE) { agRoot = oneDeviceData->agRoot; agDevHandle = oneDeviceData->agDevHandle; TI_DBG1(("tiINITaskManagement: SAS Device\n")); /* WARM_RESET is experimental code. Needs more testing and debugging */ if (task == AG_TARGET_WARM_RESET) { agsaContext_t *agContext; tdsaDeviceData_t *tdsaDeviceData; tdsaDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; currentTaskTag->tdData = tdsaDeviceData; agContext = &(tdsaDeviceData->agDeviceResetContext); agContext->osData = currentTaskTag; TI_DBG2(("tiINITaskManagement: did %d device reset for SAS\n", oneDeviceData->id)); saSetDeviceState(agRoot, agNULL, tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, SA_DS_IN_RECOVERY); /* warm reset by saLocalPhyControl or SMP PHY control */ if (oneDeviceData->directlyAttached == agTRUE) { TI_DBG2(("tiINITaskManagement: device reset directly attached\n")); saLocalPhyControl(agRoot, agContext, tdsaRotateQnumber(tiRoot, oneDeviceData), oneDeviceData->phyID, AGSA_PHY_HARD_RESET, agNULL ); return tiSuccess; } else { TI_DBG2(("tiINITaskManagement: device reset expander attached\n")); saStatus = tdsaPhyControlSend(tiRoot, oneDeviceData, SMP_PHY_CONTROL_HARD_RESET, currentTaskTag, tdsaRotateQnumber(tiRoot, oneDeviceData) ); return saStatus; } } else { /* task management */ TI_DBG6(("tiINITaskManagement: making task management frame \n")); /* 1. create task management frame 2. sends it using "saSSPStart()" */ /* Allocate memory for task management */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&TMtdIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { TI_DBG1(("tiINITaskManagement: ostiAllocMemory failed...\n")); return tiError; } if (TMtdIORequestBody == agNULL) { TI_DBG1(("tiINITaskManagement: ostiAllocMemory returned NULL TMIORequestBody\n")); return tiError; } /* initialize */ osti_memset(TMtdIORequestBody, 0, sizeof(tdIORequestBody_t)); /* setup task management structure */ TMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; TMtdIORequestBody->IOType.InitiatorTMIO.CurrentTaskTag = currentTaskTag; TMtdIORequestBody->IOType.InitiatorTMIO.TaskTag = taskTag; /* let's initialize tdIOrequestBody */ /* initialize jump table */ /* direct callback for task management */ TMtdIORequestBody->IOCompletionFunc = itdssTaskCompleted; /* to be removed */ /* TMtdIORequestBody->IOCompletionFunc = itdssIOCompleted; */ /* initialize tiDevhandle */ TMtdIORequestBody->tiDevHandle = tiDeviceHandle; /* initialize tiIORequest */ TMtdIORequestBody->tiIORequest = currentTaskTag; /* save context if we need to abort later */ currentTaskTag->tdData = TMtdIORequestBody; /* initialize agIORequest */ agIORequest = &(TMtdIORequestBody->agIORequest); agIORequest->osData = (void *) TMtdIORequestBody; agIORequest->sdkData = agNULL; /* SA takes care of this */ /* request type */ agRequestType = AGSA_SSP_TASK_MGNT_REQ; TMtdIORequestBody->agRequestType = AGSA_SSP_TASK_MGNT_REQ; /* initialize tdIORequestBody_t tdIORequestBody -> agSASRequestBody */ agSASRequestBody = &(TMtdIORequestBody->transport.SAS.agSASRequestBody); agSSPTaskMgntRequest = &(agSASRequestBody->sspTaskMgntReq); TI_DBG2(("tiINITaskManagement: did %d LUN reset for SAS\n", oneDeviceData->id)); /* fill up LUN field */ if (lun == agNULL) { osti_memset(agSSPTaskMgntRequest->lun, 0, 8); } else { osti_memcpy(agSSPTaskMgntRequest->lun, lun->lun, 8); } /* default: unconditionally set device state to SA_DS_IN_RECOVERY bit1 (DS) bit0 (ADS) bit1: 1 bit0: 0 */ agSSPTaskMgntRequest->tmOption = 2; /* sets taskMgntFunction field */ switch(task) { case AG_ABORT_TASK: agSSPTaskMgntRequest->taskMgntFunction = AGSA_ABORT_TASK; /* For abort task management, unconditionally set device state to SA_DS_IN_RECOVERY and if can't find, set device state to SA_DS_IN_RECOVERY bit1 (DS) bit0 (ADS) bit1: 1; bit0: 1 */ agSSPTaskMgntRequest->tmOption = 3; break; case AG_ABORT_TASK_SET: agSSPTaskMgntRequest->taskMgntFunction = AGSA_ABORT_TASK_SET; break; case AG_CLEAR_ACA: agSSPTaskMgntRequest->taskMgntFunction = AGSA_CLEAR_ACA; break; case AG_CLEAR_TASK_SET: agSSPTaskMgntRequest->taskMgntFunction = AGSA_CLEAR_TASK_SET; break; case AG_LOGICAL_UNIT_RESET: agSSPTaskMgntRequest->taskMgntFunction = AGSA_LOGICAL_UNIT_RESET; break; case AG_QUERY_TASK: agSSPTaskMgntRequest->taskMgntFunction = AGSA_QUERY_TASK; break; default: TI_DBG1(("tiINITaskManagement: notImplemented task\n")); break; } if (task == AGSA_ABORT_TASK || task == AGSA_QUERY_TASK) { /* set agTMRequest, which is IO being task managed */ tdIORequestBody = (tdIORequestBody_t *)taskTag->tdData; if (tdIORequestBody == agNULL) { /* to be aborted IO has been completed. */ /* free up allocated memory */ TI_DBG1(("tiINITaskManagement: IO has been completed\n")); ostiFreeMemory( tiRoot, osMemHandle, sizeof(tdIORequestBody_t) ); return tiIONoDevice; } else { agTMRequest = &(tdIORequestBody->agIORequest); } } else { /* For LUN RESET, WARM_RESET, ABORT_TASK_SET, CLEAR_ACA and CLEAR_TASK_SET no tag to be managed. Therefore, set it to zero. */ agSSPTaskMgntRequest->tagOfTaskToBeManaged = 0; agTMRequest = agNULL; } TDLIST_INIT_HDR(&TMtdIORequestBody->EsglPageList); /* debuggging */ if (TMtdIORequestBody->IOCompletionFunc == agNULL) { TI_DBG1(("tiINITaskManagement: Error!!!!! IOCompletionFunc is NULL\n")); } saStatus = saSSPStart(agRoot, agIORequest, /* task management itself */ tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, agRequestType, agSASRequestBody, /* task management itself */ agTMRequest, /* io to be aborted if exits */ &ossaSSPCompleted); if (saStatus == AGSA_RC_SUCCESS) { Initiator->NumIOsActive++; tiStatus = tiSuccess; } else { TI_DBG1(("tiINITaskManagement: saSSPStart failed 0x%x\n",saStatus)); /* free up allocated memory */ ostiFreeMemory( tiRoot, TMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); if (saStatus == AGSA_RC_FAILURE) { tiStatus = tiError; } else { /* AGSA_RC_BUSY */ tiStatus = tiBusy; } } } } /* end of sas device */ #ifdef FDS_SM else if (oneDeviceData->DeviceType == TD_SATA_DEVICE) { agsaContext_t *agContext = agNULL; /* save the task tag in tdsaDeviceData_t structure, for handling PORT_RESET_COMPLETE hw event */ agContext = &(oneDeviceData->agDeviceResetContext); agContext->osData = currentTaskTag; #ifdef REMOVED /* for directly attached SATA, do localphycontrol for LUN and target reset, not smTaskManagement*/ if (oneDeviceData->directlyAttached == agTRUE && (task == AG_LOGICAL_UNIT_RESET || task == AG_TARGET_WARM_RESET)) { agRoot = oneDeviceData->agRoot; agDevHandle = oneDeviceData->agDevHandle; currentTaskTag->tdData = oneDeviceData; if (task == AG_LOGICAL_UNIT_RESET) { if ( (lun->lun[0] | lun->lun[1] | lun->lun[2] | lun->lun[3] | lun->lun[4] | lun->lun[5] | lun->lun[6] | lun->lun[7] ) != 0 ) { TI_DBG1(("tiINITaskManagement: *** REJECT *** LUN not zero, tiDeviceHandle=%p\n", tiDeviceHandle)); return tiError; } } saSetDeviceState(agRoot, agNULL, tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, SA_DS_IN_RECOVERY); tiStatus = saLocalPhyControl(agRoot, agContext, tdsaRotateQnumber(tiRoot, oneDeviceData), oneDeviceData->phyID, AGSA_PHY_HARD_RESET, agNULL); } else #endif { smRoot = &(tdsaAllShared->smRoot); smDeviceHandle = &(oneDeviceData->smDeviceHandle); TI_DBG1(("tiINITaskManagement: FDS_SM SATA Device\n")); if ( taskTag != agNULL) { ToBeAbortedtdIORequestBody = (tdIORequestBody_t *)taskTag->tdData; ToBeAborted = &(ToBeAbortedtdIORequestBody->smIORequest); } SMmemAllocStatus = ostiAllocMemory( tiRoot, &SMosMemHandle, (void **)&SMTMtdIORequestBody, &SMPhysUpper32, &SMPhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (SMmemAllocStatus != tiSuccess) { TI_DBG1(("tiINITaskManagement: ostiAllocMemory failed... loc 2\n")); return tiError; } if (SMTMtdIORequestBody == agNULL) { TI_DBG1(("tiINITaskManagement: ostiAllocMemory returned NULL TMIORequestBody loc 2\n")); return tiError; } /* initialize */ osti_memset(SMTMtdIORequestBody, 0, sizeof(tdIORequestBody_t)); /* setup task management structure */ SMTMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle = SMosMemHandle; SMTMtdIORequestBody->IOType.InitiatorTMIO.CurrentTaskTag = currentTaskTag; SMTMtdIORequestBody->IOType.InitiatorTMIO.TaskTag = taskTag; /* initialize tiDevhandle */ SMTMtdIORequestBody->tiDevHandle = tiDeviceHandle; /* initialize tiIORequest */ SMTMtdIORequestBody->tiIORequest = currentTaskTag; /* save context if we need to abort later */ currentTaskTag->tdData = SMTMtdIORequestBody; TaskManagement = &(SMTMtdIORequestBody->smIORequest); TaskManagement->tdData = SMTMtdIORequestBody; TaskManagement->smData = &SMTMtdIORequestBody->smIORequestBody; tiStatus = smTaskManagement(smRoot, smDeviceHandle, task, (smLUN_t*)lun, ToBeAborted, TaskManagement ); if (tiStatus != SM_RC_SUCCESS) { TI_DBG1(("tiINITaskManagement: smTaskManagement failed... loc 2\n")); /* free up allocated memory */ ostiFreeMemory( tiRoot, SMTMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } } /* else */ } #else else if (oneDeviceData->DeviceType == TD_SATA_DEVICE) { agRoot = oneDeviceData->agRoot; agDevHandle = oneDeviceData->agDevHandle; TI_DBG1(("tiINITaskManagement: not FDS_SM SATA Device\n")); /* WARM_RESET is experimental Needs more testing and debugging Soft reset for SATA as LUN RESET tends not to work. Let's do hard reset */ if (task == AG_LOGICAL_UNIT_RESET || task == AG_TARGET_WARM_RESET) { agsaContext_t *agContext; satDeviceData_t *satDevData; tdsaDeviceData_t *tdsaDeviceData; TI_DBG2(("tiINITaskManagement: did %d LUN reset or device reset for SATA\n", oneDeviceData->id)); tdsaDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; satDevData = &tdsaDeviceData->satDevData; currentTaskTag->tdData = tdsaDeviceData; agContext = &(tdsaDeviceData->agDeviceResetContext); agContext->osData = currentTaskTag; if (task == AG_LOGICAL_UNIT_RESET) { if ( (lun->lun[0] | lun->lun[1] | lun->lun[2] | lun->lun[3] | lun->lun[4] | lun->lun[5] | lun->lun[6] | lun->lun[7] ) != 0 ) { TI_DBG1(("tiINITaskManagement: *** REJECT *** LUN not zero, tiDeviceHandle=%p\n", tiDeviceHandle)); return tiError; } /* * Check if there is other TM request pending */ if (satDevData->satTmTaskTag != agNULL) { TI_DBG1(("tiINITaskManagement: *** REJECT *** other TM pending, tiDeviceHandle=%p\n", tiDeviceHandle)); return tiError; } } satDevData->satDriveState = SAT_DEV_STATE_IN_RECOVERY; satDevData->satAbortAfterReset = agFALSE; saSetDeviceState(agRoot, agNULL, tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, SA_DS_IN_RECOVERY); /* warm reset by saLocalPhyControl or SMP PHY control */ if (oneDeviceData->directlyAttached == agTRUE) { TI_DBG1(("tiINITaskManagement: LUN reset or device reset directly attached\n")); saLocalPhyControl(agRoot, agContext, tdsaRotateQnumber(tiRoot, oneDeviceData), oneDeviceData->phyID, AGSA_PHY_HARD_RESET, agNULL); return tiSuccess; } else { TI_DBG1(("tiINITaskManagement: LUN reset or device reset expander attached\n")); saStatus = tdsaPhyControlSend(tiRoot, oneDeviceData, SMP_PHY_CONTROL_HARD_RESET, currentTaskTag, tdsaRotateQnumber(tiRoot, oneDeviceData) ); return saStatus; } } else { TI_DBG2(("tiINITaskManagement: calling satTM().\n")); /* allocation tdIORequestBody and pass it to satTM() */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&TMtdIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { TI_DBG1(("tiINITaskManagement: ostiAllocMemory failed... loc 2\n")); return tiError; } if (TMtdIORequestBody == agNULL) { TI_DBG1(("tiINITaskManagement: ostiAllocMemory returned NULL TMIORequestBody loc 2\n")); return tiError; } /* initialize */ osti_memset(TMtdIORequestBody, 0, sizeof(tdIORequestBody_t)); /* setup task management structure */ TMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; TMtdIORequestBody->IOType.InitiatorTMIO.CurrentTaskTag = currentTaskTag; TMtdIORequestBody->IOType.InitiatorTMIO.TaskTag = taskTag; /* initialize tiDevhandle */ TMtdIORequestBody->tiDevHandle = tiDeviceHandle; /* initialize tiIORequest */ TMtdIORequestBody->tiIORequest = currentTaskTag; /* save context if we need to abort later */ currentTaskTag->tdData = TMtdIORequestBody; /* initialize agIORequest */ agIORequest = &(TMtdIORequestBody->agIORequest); agIORequest->osData = (void *) TMtdIORequestBody; agIORequest->sdkData = agNULL; /* SA takes care of this */ #ifdef SATA_ENABLE tiStatus = satTM( tiRoot, tiDeviceHandle, task, lun, taskTag, currentTaskTag, TMtdIORequestBody, agTRUE ); #endif } } #endif /* FDS_SM else*/ return tiStatus; } #endif /* INITIATOR_DRIVER */ #ifdef PASSTHROUGH osGLOBAL bit32 tiCOMPassthroughCmndStart( tiRoot_t *tiRoot, tiPassthroughRequest_t *tiPassthroughRequest, tiDeviceHandle_t *tiDeviceHandle, tiPassthroughCmnd_t *tiPassthroughCmnd, void *tiPassthroughBody, tiPortalContext_t *tiportalContext, ostiPassthroughCmndEvent_t agEventCB ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdsaDeviceData_t *oneDeviceData; agsaRoot_t *agRoot = agNULL; agsaIORequest_t *agIORequest = agNULL; agsaDevHandle_t *agDevHandle = agNULL; bit32 agRequestType; agsaSASRequestBody_t *agSASRequestBody = agNULL; tdPassthroughCmndBody_t *tdPTCmndBody; tdssSMPRequestBody_t *tdssSMPRequestBody; agsaSMPFrame_t *agSMPFrame; agsaSSPVSFrame_t *agSSPVendorFrame; /* RMC */ bit32 SMPFn, SMPFnResult, SMPFrameLen; bit32 tiStatus = tiError; bit32 saStatus = AGSA_RC_FAILURE; tdsaPortStartInfo_t *tdsaPortStartInfo; tdsaPortContext_t *tdsaPortContext; TI_DBG2(("tiCOMPassthroughCmndStart: start\n")); oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; TI_DBG6(("tiCOMPassthroughCmndStart: onedevicedata %p\n", oneDeviceData)); tdPTCmndBody = (tdPassthroughCmndBody_t *)tiPassthroughBody; if (tiPassthroughCmnd->passthroughCmnd != tiSMPCmnd || tiPassthroughCmnd->passthroughCmnd != tiRMCCmnd) { return tiNotSupported; } if (oneDeviceData == agNULL && tiPassthroughCmnd->passthroughCmnd != tiSMPCmnd) { TI_DBG1(("tiCOMPassthroughCmndStart: tiDeviceHandle=%p DeviceData is NULL\n", tiDeviceHandle )); return tiIONoDevice; } /* starting IO with SAS device */ if (oneDeviceData->DeviceType == TD_SAS_DEVICE) { if (tiPassthroughCmnd->passthroughCmnd == tiSMPCmnd) { TI_DBG2(("tiCOMPassthroughCmndStart: SMP\n")); if (oneDeviceData == agNULL) { tdsaPortStartInfo = (tdsaPortStartInfo_t *)tiportalContext->tdData; tdsaPortContext = tdsaPortStartInfo->portContext; agRoot = tdsaPortContext->agRoot; } else { agRoot = oneDeviceData->agRoot; agDevHandle = oneDeviceData->agDevHandle; } tdssSMPRequestBody = &(tdPTCmndBody->protocol.SMP.SMPBody); agSASRequestBody = &(tdssSMPRequestBody->agSASRequestBody); agSMPFrame = &(agSASRequestBody->smpFrame); /* saves callback function */ tdPTCmndBody->EventCB = agEventCB; /* initialize command type */ tdPTCmndBody->tiPassthroughCmndType = tiSMPCmnd; /* initialize tipassthroughrequest */ tdPTCmndBody->tiPassthroughRequest = tiPassthroughRequest; tiPassthroughRequest->tdData = tdPTCmndBody; /* initialize tiDevhandle */ tdPTCmndBody->tiDevHandle = tiDeviceHandle; /* fill in SMP header */ agSMPFrame->frameHeader.smpFrameType = tiPassthroughCmnd->protocol.SMP.SMPHeader.smpFrameType; agSMPFrame->frameHeader.smpFunction = tiPassthroughCmnd->protocol.SMP.SMPHeader.smpFunction; agSMPFrame->frameHeader.smpFunctionResult = tiPassthroughCmnd->protocol.SMP.SMPHeader.smpFunctionResult; agSMPFrame->frameHeader.smpReserved = tiPassthroughCmnd->protocol.SMP.SMPHeader.smpReserved; if (tiPassthroughCmnd->protocol.SMP.IT == SMP_INITIATOR) { agRequestType = AGSA_SMP_INIT_REQ; } else { agRequestType = AGSA_SMP_TGT_RESPONSE; /* this is only for SMP target */ agSMPFrame->phyId = tiPassthroughCmnd->protocol.SMP.phyID; } /* fill in payload */ /* assumption: SMP payload is in tisgl1 */ agSMPFrame->frameAddrUpper32 = tiPassthroughCmnd->tiSgl.upper; agSMPFrame->frameAddrLower32 = tiPassthroughCmnd->tiSgl.lower; /* This length excluding SMP header (4 bytes) and CRC field */ agSMPFrame->frameLen = tiPassthroughCmnd->tiSgl.len; /* initialize agIORequest */ /* Compare: tdIORequestBody = (tdIORequestBody_t *)agIORequest->osData; */ agIORequest = &(tdssSMPRequestBody->agIORequest); agIORequest->osData = (void *) tdPTCmndBody; agIORequest->sdkData = agNULL; /* LL takes care of this */ /* not work yet because of high priority q */ saStatus = saSMPStart( agRoot, agIORequest, agDevHandle, agRequestType, agSASRequestBody, &ossaSMPCompleted ); if (saStatus == AGSA_RC_SUCCESS) { tiStatus = tiSuccess; } else if (saStatus == AGSA_RC_FAILURE) { TI_DBG1(("tiCOMPassthroughCmndStart: saSMPStart failed\n")); tiStatus = tiError; } else { /* AGSA_RC_BUSY */ TI_DBG1(("tiCOMPassthroughCmndStart: saSMPStart busy\n")); tiStatus = tiBusy; } return tiStatus; #ifdef TO_DO /* fill in SMP header */ if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { agSMPFrame->frameHeader.smpFrameType = SMP_REQUEST; /* SMP REQUEST */ agRequestType = AGSA_SMP_INIT_REQ; } else { /* SMP target */ agSMPFrame->frameHeader.smpFrameType = SMP_RESPONSE; /* SMP RESPONSE */ agRequestType = AGSA_SMP_TGT_RESPONSE; switch (tdPTCmndBody->protocol.SMP.SMPFnResult) { case tiSMPFunctionAccepted: SMPFnResult = SMP_FUNCTION_ACCEPTED; break; case tiUnknownSMPFunction: SMPFnResult = UNKNOWN_SMP_FUNCTION; break; case tiSMPFunctionFailed: SMPFnResult = SMP_FUNCTION_FAILED; break; case tiInvalidRequestFrameLength: SMPFnResult = INVALID_REQUEST_FRAME_LENGTH; break; case tiPhyDoesNotExist: SMPFnResult =PHY_DOES_NOT_EXIST; break; case tiIndexDoesNotExist: SMPFnResult = INDEX_DOES_NOT_EXIST; break; case tiPhyDoesNotSupportSATA: SMPFnResult = PHY_DOES_NOT_SUPPORT_SATA; break; case tiUnknownPhyOperation: SMPFnResult = UNKNOWN_PHY_OPERATION; break; case tiUnknownPhyTestFunction: SMPFnResult = UNKNOWN_PHY_TEST_FUNCTION; break; case tiPhyTestFunctionInProgress: SMPFnResult = PHY_TEST_FUNCTION_IN_PROGRESS; break; case tiPhyVacant: SMPFnResult = PHY_VACANT; break; default: TI_DBG1(("tiCOMPassthroughCmndStart: unknown SMP function result %d\n", tdPTCmndBody->protocol.SMP.SMPFnResult)); return tiError; } agSMPFrame->frameHeader.smpFunctionResult = SMPFnResult; } /* common */ switch (tdPTCmndBody->protocol.SMP.SMPFn) { case tiGeneral: SMPFn = SMP_REPORT_GENERAL; if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { SMPFrameLen = 0; } else { SMPFrameLen = sizeof(smpRespReportGeneral_t); } break; case tiManufacturerInfo: SMPFn = SMP_REPORT_MANUFACTURE_INFORMATION; if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { SMPFrameLen = 0; } else { SMPFrameLen = sizeof(smpRespReportManufactureInfo_t); } break; case tiDiscover: SMPFn = SMP_DISCOVER; if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { SMPFrameLen = sizeof(smpReqDiscover_t); } else { SMPFrameLen = sizeof(smpRespDiscover_t); } break; case tiReportPhyErrLog: SMPFn = SMP_REPORT_PHY_ERROR_LOG; if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { SMPFrameLen = 8; } else { SMPFrameLen = 24; } break; case tiReportPhySATA: SMPFn = SMP_REPORT_PHY_SATA; if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { SMPFrameLen = sizeof(SmpReqReportPhySata_t); } else { SMPFrameLen = sizeof(SmpRespReportPhySata_t); } break; case tiReportRteInfo: SMPFn = SMP_REPORT_ROUTING_INFORMATION; if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { SMPFrameLen = sizeof(SmpReqReportRouteTable_t); } else { SMPFrameLen = sizeof(SmpRespReportRouteTable_t); } break; case tiConfigureRteInfo: - SMPFn = SMP_CONFIGURE_ROUTING_INFORMATION;; + SMPFn = SMP_CONFIGURE_ROUTING_INFORMATION; if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { SMPFrameLen = sizeof(SmpReqConfigureRouteInformation_t); } else { SMPFrameLen = 0; } break; case tiPhyCtrl: SMPFn = SMP_PHY_CONTROL; if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { SMPFrameLen = sizeof(SmpReqPhyControl_t); } else { SMPFrameLen = 0; } break; case tiPhyTestFn: SMPFn = SMP_PHY_TEST_FUNCTION; if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { SMPFrameLen = 36; } else { SMPFrameLen = 0; } break; case tiPMC: SMPFn = SMP_PMC_SPECIFIC; if (tdPTCmndBody->protocol.SMP.IT == SMP_INITIATOR) { SMPFrameLen = 0; } else { SMPFrameLen = 0; } break; default: TI_DBG1(("tiCOMPassthroughCmndStart: unknown SMP function %d\n", tdPTCmndBody->protocol.SMP.SMPFn)); return tiError; } agSMPFrame->frameHeader.smpFunction = SMPFn; /* assumption: SMP payload is in tisgl1 */ agSMPFrame->frameAddrUpper32 = tdPTCmndBody->tiSgl.upper; agSMPFrame->frameAddrLower32 = tdPTCmndBody->tiSgl.lower; /* This length excluding SMP header (4 bytes) and CRC field */ agSMPFrame->frameLen = SMPFrameLen; #endif } else if (tiPassthroughCmnd->passthroughCmnd == tiRMCCmnd) { TI_DBG2(("tiCOMPassthroughCmndStart: RMC\n")); } else { TI_DBG1(("tiCOMPassthroughCmndStart: unknown protocol %d\n", tiPassthroughCmnd->passthroughCmnd)); } } else if (oneDeviceData->DeviceType == TD_SATA_DEVICE) { TI_DBG1(("tiCOMPassthroughCmndStart: error !!! no SATA support\n")); return tiError; } else { TI_DBG1(("tiCOMPassthroughCmndStart: error !!! unknown devietype %d\n", oneDeviceData->DeviceType)); return tiError; } return tiSuccess; } osGLOBAL bit32 tiCOMPassthroughCmndAbort( tiRoot_t *tiRoot, tiPassthroughRequest_t *taskTag ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = agNULL; tdPassthroughCmndBody_t *tdPTCmndBody = agNULL; tdssSMPRequestBody_t *tdssSMPRequestBody = agNULL; agsaIORequest_t *agIORequest = agNULL; bit32 saStatus, tiStatus = tiError; TI_DBG2(("tiCOMPassthroughCmndAbort: start\n")); agRoot = &(tdsaAllShared->agRootNonInt); tdPTCmndBody = (tdPassthroughCmndBody_t *)taskTag->tdData; if (tdPTCmndBody->tiPassthroughCmndType == tiSMPCmnd) { tdssSMPRequestBody = &(tdPTCmndBody->protocol.SMP.SMPBody); agIORequest = &(tdssSMPRequestBody->agIORequest); saStatus = saSMPAbort(agRoot, agIORequest); if (saStatus == AGSA_RC_SUCCESS) { tiStatus = tiSuccess; } else if (saStatus == AGSA_RC_FAILURE) { TI_DBG1(("tiCOMPassthroughCmndAbort: saSMPAbort failed\n")); tiStatus = tiError; } else { /* AGSA_RC_BUSY */ TI_DBG1(("tiCOMPassthroughCmndAbort: saSMPAbort busy\n")); tiStatus = tiBusy; } return tiStatus; } else if (tdPTCmndBody->tiPassthroughCmndType == tiRMCCmnd) { TI_DBG1(("tiCOMPassthroughCmndAbort: RMC passthrough command type, not yet\n")); } else { TI_DBG1(("tiCOMPassthroughCmndAbort: unknown passthrough command type %d\n", tdPTCmndBody->tiPassthroughCmndType)); return tiStatus; } } osGLOBAL bit32 tiINIPassthroughCmndRemoteAbort( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle, tiPassthroughRequest_t *taskTag, tiPassthroughRequest_t *currentTaskTag, tiPortalContext_t *tiportalContext ) { TI_DBG2(("tiINIPassthroughCmndRemoteAbort: start\n")); /* for SMP, nothing. Can't abot remotely */ return tiSuccess; } #endif /* PASSTHROUGH */ /***************************************************************************** *! \brief tiCOMShutDown * * Purpose: This function is called to shutdown the initiator and/or target * operation. Following the completion of this call, the state is * equivalent to the state prior to tiCOMInit() * * \param tiRoot: Pointer to root data structure. * * \return None * * *****************************************************************************/ osGLOBAL void tiCOMShutDown( tiRoot_t *tiRoot) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; // #define TI_GETFOR_ONSHUTDOWN #ifdef TI_GETFOR_ONSHUTDOWN agsaForensicData_t forensicData; bit32 once = 1; bit32 status; #endif /* TI_GETFOR_ONSHUTDOWN */ agsaRoot_t *agRoot = agNULL; TI_DBG1(("tiCOMShutDown: start\n")); agRoot = &(tdsaAllShared->agRootNonInt); /* 1. free up cardID 2. call saHwShutdown() 3. tdInitEsgl(tiRoot); 4. tdsaResetComMemFlags(tiRoot) 5. ostiPortEvent() */ tdsaFreeCardID(tiRoot, tdsaAllShared->CardID); #ifdef TI_GETFOR_ONSHUTDOWN forensicData.DataType = TYPE_NON_FATAL; forensicData.dataBuf.directLen = (8 * 1024); forensicData.dataBuf.directOffset = 0; /* current offset */ forensicData.dataBuf.directData = agNULL; forensicData.dataBuf.readLen = 0; /* Data read */ getmoreData: status = saGetForensicData( agRoot, agNULL, &forensicData); TI_DBG1(("tiCOMShutDown:readLen 0x%x directLen 0x%x directOffset 0x%x\n", forensicData.dataBuf.readLen, forensicData.dataBuf.directLen, forensicData.dataBuf.directOffset)); if( forensicData.dataBuf.readLen == forensicData.dataBuf.directLen && !status && once) { goto getmoreData; } TI_DBG1(("tiCOMShutDown:saGetForensicData type %d read 0x%x bytes\n", forensicData.DataType, forensicData.dataBuf.directOffset )); #endif /* TI_GETFOR_ONSHUTDOWN */ saHwShutdown(agRoot); /* resets all the relevant flags */ tdsaResetComMemFlags(tiRoot); /* * send an event to the oslayer */ ostiPortEvent ( tiRoot, tiPortShutdown, tiSuccess, agNULL ); return; } #ifdef INITIATOR_DRIVER osGLOBAL void tiINITimerTick( tiRoot_t *tiRoot ) { /* no timer is used in SAS TD layer. Therefore, this function is null. */ // TI_DBG2(("tiINITimerTick: start\n")); /*itdsaProcessTimers(tiRoot);*/ return; } #endif /*****************************************************************************/ /*! \brief ossaDisableInterrupts * * * Purpose: This routine is called to disable interrupt * * * \param agRoot: Pointer to chip/driver Instance. * \param outboundChannelNum: Zero-base channel number * * * \return None. * * \note - The scope is shared target and initiator. * */ /*****************************************************************************/ #ifndef ossaDisableInterrupts osGLOBAL void ossaDisableInterrupts( agsaRoot_t *agRoot, bit32 outboundChannelNum ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *) (agRoot->osData); ostiInterruptDisable( osData->tiRoot, outboundChannelNum ); return; } #endif osGLOBAL void tiCOMFrameReadBlock( tiRoot_t *tiRoot, void *agFrame, bit32 FrameOffset, void *FrameBuffer, bit32 FrameBufLen ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = agNULL; TI_DBG6(("tiCOMFrameReadBlock: start\n")); agRoot = &(tdsaAllShared->agRootNonInt); TI_DBG6(("tiCOMFrameReadBlock: start\n")); saFrameReadBlock(agRoot, agFrame, FrameOffset, FrameBuffer, FrameBufLen); return; } /***************************************************************************** *! \brief tiINITransportRecovery * * Purpose: This routine is called to explicitly ask the Transport Dependent * Layer to initiate the recovery for the transport/protocol specific * error for a specific device connection. * * \param tiRoot: Pointer to driver instance * \param tiDeviveHandle: Pointer to the device handle for this session. * * \return: None * * *****************************************************************************/ #ifdef INITIATOR_DRIVER osGLOBAL void tiINITransportRecovery ( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle ) { agsaRoot_t *agRoot = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; tdsaPortContext_t *onePortContext = agNULL; tiPortalContext_t *tiPortalContext = agNULL; tiIORequest_t *currentTaskTag; agsaDevHandle_t *agDevHandle = agNULL; TI_DBG1(("tiINITransportRecovery: start\n")); if (tiDeviceHandle == agNULL) { TI_DBG1(("tiINITransportRecovery: tiDeviceHandle is NULL\n")); return; } oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; if (oneDeviceData == agNULL) { TI_DBG1(("tiINITransportRecovery: oneDeviceData is NULL\n")); return; } /* for hotplug */ if (oneDeviceData->valid != agTRUE || oneDeviceData->registered != agTRUE || oneDeviceData->tdPortContext == agNULL ) { TI_DBG1(("tiINITransportRecovery: NO Device did %d\n", oneDeviceData->id )); TI_DBG1(("tiINITransportRecovery: device AddrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG1(("tiINITransportRecovery: device AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); return; } onePortContext = oneDeviceData->tdPortContext; if (onePortContext == agNULL) { TI_DBG1(("tiINITransportRecovery: onePortContext is NULL\n")); return; } tiPortalContext = onePortContext->tiPortalContext; currentTaskTag = &(oneDeviceData->TransportRecoveryIO); currentTaskTag->osData = agNULL; agRoot = oneDeviceData->agRoot; agDevHandle = oneDeviceData->agDevHandle; if (oneDeviceData->DeviceType == TD_SAS_DEVICE) { agsaContext_t *agContext; currentTaskTag->tdData = oneDeviceData; agContext = &(oneDeviceData->agDeviceResetContext); agContext->osData = currentTaskTag; oneDeviceData->TRflag = agTRUE; TI_DBG2(("tiINITransportRecovery: SAS device\n")); saSetDeviceState(agRoot, agNULL, tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, SA_DS_IN_RECOVERY); if (oneDeviceData->directlyAttached == agTRUE) { TI_DBG2(("tiINITransportRecovery: saLocalPhyControl\n")); saLocalPhyControl(agRoot, agContext, tdsaRotateQnumber(tiRoot, oneDeviceData), oneDeviceData->phyID, AGSA_PHY_HARD_RESET, agNULL); ostiInitiatorEvent(tiRoot, tiPortalContext, tiDeviceHandle, tiIntrEventTypeTransportRecovery, tiRecStarted, agNULL ); return; } else { TI_DBG2(("tiINITransportRecovery: device reset expander attached\n")); tdsaPhyControlSend(tiRoot, oneDeviceData, SMP_PHY_CONTROL_HARD_RESET, currentTaskTag, tdsaRotateQnumber(tiRoot, oneDeviceData) ); ostiInitiatorEvent(tiRoot, tiPortalContext, tiDeviceHandle, tiIntrEventTypeTransportRecovery, tiRecStarted, agNULL ); return; } } else if (oneDeviceData->DeviceType == TD_SATA_DEVICE) { agsaContext_t *agContext; currentTaskTag->tdData = oneDeviceData; agContext = &(oneDeviceData->agDeviceResetContext); agContext->osData = currentTaskTag; oneDeviceData->TRflag = agTRUE; TI_DBG2(("tiINITransportRecovery: SATA device\n")); saSetDeviceState(agRoot, agNULL, tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, SA_DS_IN_RECOVERY); if (oneDeviceData->directlyAttached == agTRUE) { TI_DBG2(("tiINITransportRecovery: saLocalPhyControl\n")); saLocalPhyControl(agRoot, agContext, tdsaRotateQnumber(tiRoot, oneDeviceData), oneDeviceData->phyID, AGSA_PHY_LINK_RESET, agNULL); ostiInitiatorEvent(tiRoot, tiPortalContext, tiDeviceHandle, tiIntrEventTypeTransportRecovery, tiRecStarted, agNULL ); return; } else { TI_DBG2(("tiINITransportRecovery: device reset expander attached\n")); tdsaPhyControlSend(tiRoot, oneDeviceData, SMP_PHY_CONTROL_LINK_RESET, currentTaskTag, tdsaRotateQnumber(tiRoot, oneDeviceData) ); ostiInitiatorEvent(tiRoot, tiPortalContext, tiDeviceHandle, tiIntrEventTypeTransportRecovery, tiRecStarted, agNULL ); return; } } else { TI_DBG1(("tiINITransportRecovery: wrong device type %d\n", oneDeviceData->DeviceType)); } return; } #endif #if defined (INITIATOR_DRIVER) && defined (TARGET_DRIVER) /***************************************************************************** *! \brief tdsaPhyControlSend * * Purpose: This function sends Phy Control to a device. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneDeviceData: Pointer to the device data. * \param phyId: Phy Identifier. * \param queueNumber: bits 0-15: inbound queue number. * bits 16-31: outbound queue number. * * \return: * Status * * \note: * *****************************************************************************/ /* phyop of interest SMP_PHY_CONTROL_HARD_RESET or SMP_PHY_CONTROL_CLEAR_AFFILIATION if CurrentTaskTag == agNULL, clear affiliation if CurrentTaskTag != agNULL, PHY_CONTROL (device reset) */ osGLOBAL bit32 tdsaPhyControlSend( tiRoot_t *tiRoot, tdsaDeviceData_t *oneDeviceData, /* taget disk */ bit8 phyOp, tiIORequest_t *CurrentTaskTag, bit32 queueNumber ) { return 0; } #endif #ifdef TARGET_DRIVER /***************************************************************************** *! \brief tdsaPhyControlSend * * Purpose: This function sends Phy Control to a device. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneDeviceData: Pointer to the device data. * \param phyId: Phy Identifier. * \param queueNumber: bits 0-15: inbound queue number. * bits 16-31: outbound queue number. * * \return: * Status * * \note: * *****************************************************************************/ /* phyop of interest SMP_PHY_CONTROL_HARD_RESET or SMP_PHY_CONTROL_CLEAR_AFFILIATION if CurrentTaskTag == agNULL, clear affiliation if CurrentTaskTag != agNULL, PHY_CONTROL (device reset) */ osGLOBAL bit32 tdsaPhyControlSend( tiRoot_t *tiRoot, tdsaDeviceData_t *oneDeviceData, /* taget disk */ bit8 phyOp, tiIORequest_t *CurrentTaskTag, bit32 queueNumber ) { return 0; } #endif #ifdef INITIATOR_DRIVER /***************************************************************************** *! \brief tdsaPhyControlSend * * Purpose: This function sends Phy Control to a device. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneDeviceData: Pointer to the device data. * \param phyId: Phy Identifier. * \param queueNumber: bits 0-15: inbound queue number. * bits 16-31: outbound queue number. * * \return: * Status * * \note: * *****************************************************************************/ /* phyop of interest SMP_PHY_CONTROL_HARD_RESET or SMP_PHY_CONTROL_CLEAR_AFFILIATION if CurrentTaskTag == agNULL, clear affiliation if CurrentTaskTag != agNULL, PHY_CONTROL (device reset) */ osGLOBAL bit32 tdsaPhyControlSend( tiRoot_t *tiRoot, tdsaDeviceData_t *oneDeviceData, /* taget disk */ bit8 phyOp, tiIORequest_t *CurrentTaskTag, bit32 queueNumber ) { agsaRoot_t *agRoot; tdsaDeviceData_t *oneExpDeviceData; tdsaPortContext_t *onePortContext; smpReqPhyControl_t smpPhyControlReq; bit8 phyID; bit32 status; TI_DBG3(("tdsaPhyControlSend: start\n")); agRoot = oneDeviceData->agRoot; onePortContext = oneDeviceData->tdPortContext; oneExpDeviceData = oneDeviceData->ExpDevice; phyID = oneDeviceData->phyID; if (oneDeviceData->directlyAttached == agTRUE) { TI_DBG1(("tdsaPhyControlSend: Error!!! deivce is directly attached\n")); return AGSA_RC_FAILURE; } if (onePortContext == agNULL) { TI_DBG1(("tdsaPhyControlSend: Error!!! portcontext is NULL\n")); return AGSA_RC_FAILURE; } if (oneExpDeviceData == agNULL) { TI_DBG1(("tdsaPhyControlSend: Error!!! expander is NULL\n")); return AGSA_RC_FAILURE; } if (phyOp == SMP_PHY_CONTROL_HARD_RESET) { TI_DBG3(("tdsaPhyControlSend: SMP_PHY_CONTROL_HARD_RESET\n")); } if (phyOp == SMP_PHY_CONTROL_LINK_RESET) { TI_DBG3(("tdsaPhyControlSend: SMP_PHY_CONTROL_LINK_RESET\n")); } if (phyOp == SMP_PHY_CONTROL_CLEAR_AFFILIATION) { TI_DBG3(("tdsaPhyControlSend: SMP_PHY_CONTROL_CLEAR_AFFILIATION\n")); } TI_DBG3(("tdsaPhyControlSend: target device AddrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaPhyControlSend: target device AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG3(("tdsaPhyControlSend: expander AddrHi 0x%08x\n", oneExpDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaPhyControlSend: expander AddrLo 0x%08x\n", oneExpDeviceData->SASAddressID.sasAddressLo)); TI_DBG3(("tdsaPhyControlSend: did %d expander did %d phyid %d\n", oneDeviceData->id, oneExpDeviceData->id, phyID)); osti_memset(&smpPhyControlReq, 0, sizeof(smpReqPhyControl_t)); /* fill in SMP payload */ smpPhyControlReq.phyIdentifier = phyID; smpPhyControlReq.phyOperation = phyOp; status = tdSMPStart( tiRoot, agRoot, oneExpDeviceData, SMP_PHY_CONTROL, (bit8 *)&smpPhyControlReq, sizeof(smpReqPhyControl_t), AGSA_SMP_INIT_REQ, CurrentTaskTag, queueNumber ); return status; } #endif /***************************************************************************** *! \brief tdsaPhyControlFailureRespRcvd * * Purpose: This function processes the failure of Phy Control response. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param agRoot: Pointer to chip/driver Instance. * \param oneDeviceData: Pointer to the device data. * \param frameHeader: Pointer to SMP frame header. * \param frameHandle: A Handle used to refer to the response frame * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaPhyControlFailureRespRcvd( tiRoot_t *tiRoot, agsaRoot_t *agRoot, tdsaDeviceData_t *oneDeviceData, tdssSMPFrameHeader_t *frameHeader, agsaFrameHandle_t frameHandle, tiIORequest_t *CurrentTaskTag ) { #if defined(INITIATOR_DRIVER) || defined(TD_DEBUG_ENABLE) tdsaDeviceData_t *TargetDeviceData = agNULL; #endif #ifdef TD_DEBUG_ENABLE satDeviceData_t *pSatDevData = agNULL; #endif // agsaDevHandle_t *agDevHandle = agNULL; TI_DBG1(("tdsaPhyControlFailureRespRcvd: start\n")); TI_DBG3(("tdsaPhyControlFailureRespRcvd: expander device AddrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaPhyControlFailureRespRcvd: expander device AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); if (CurrentTaskTag != agNULL ) { /* This was set in tiINITaskmanagement() */ #if defined(INITIATOR_DRIVER) || defined(TD_DEBUG_ENABLE) TargetDeviceData = (tdsaDeviceData_t *)CurrentTaskTag->tdData; #endif #ifdef TD_DEBUG_ENABLE pSatDevData = (satDeviceData_t *)&(TargetDeviceData->satDevData); #endif // agDevHandle = TargetDeviceData->agDevHandle; TI_DBG2(("tdsaPhyControlFailureRespRcvd: target AddrHi 0x%08x\n", TargetDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("tdsaPhyControlFailureRespRcvd: target AddrLo 0x%08x\n", TargetDeviceData->SASAddressID.sasAddressLo)); #ifdef TD_DEBUG_ENABLE TI_DBG2(("tdsaPhyControlFailureRespRcvd: satPendingIO %d satNCQMaxIO %d\n", pSatDevData->satPendingIO, pSatDevData->satNCQMaxIO )); TI_DBG2(("tdsaPhyControlFailureRespRcvd: satPendingNCQIO %d satPendingNONNCQIO %d\n", pSatDevData->satPendingNCQIO, pSatDevData->satPendingNONNCQIO)); #endif } #ifdef INITIATOR_DRIVER if (CurrentTaskTag != agNULL ) { TI_DBG1(("tdsaPhyControlRespRcvd: callback to OS layer with failure\n")); if (TargetDeviceData->TRflag == agTRUE) { TargetDeviceData->TRflag = agFALSE; ostiInitiatorEvent(tiRoot, TargetDeviceData->tdPortContext->tiPortalContext, &(TargetDeviceData->tiDeviceHandle), tiIntrEventTypeTransportRecovery, tiRecFailed , agNULL ); } else { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, CurrentTaskTag ); } } #endif return; } /***************************************************************************** *! \brief tdsaPhyControlRespRcvd * * Purpose: This function processes Phy Control response. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param agRoot: Pointer to chip/driver Instance. * \param oneDeviceData: Pointer to the device data. * \param frameHeader: Pointer to SMP frame header. * \param frameHandle: A Handle used to refer to the response frame * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaPhyControlRespRcvd( tiRoot_t *tiRoot, agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, tdsaDeviceData_t *oneDeviceData, tdssSMPFrameHeader_t *frameHeader, agsaFrameHandle_t frameHandle, tiIORequest_t *CurrentTaskTag ) { #if defined(INITIATOR_DRIVER) || defined(TD_DEBUG_ENABLE) tdsaDeviceData_t *TargetDeviceData = agNULL; #endif #ifdef INITIATOR_DRIVER satDeviceData_t *pSatDevData = agNULL; agsaDevHandle_t *agDevHandle = agNULL; #endif TI_DBG3(("tdsaPhyControlRespRcvd: start\n")); TI_DBG3(("tdsaPhyControlRespRcvd: expander device AddrHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaPhyControlRespRcvd: expander device AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); if (CurrentTaskTag != agNULL ) { /* This was set in tiINITaskmanagement() */ #if defined(INITIATOR_DRIVER) || defined(TD_DEBUG_ENABLE) TargetDeviceData = (tdsaDeviceData_t *)CurrentTaskTag->tdData; #endif #ifdef INITIATOR_DRIVER pSatDevData = (satDeviceData_t *)&(TargetDeviceData->satDevData); agDevHandle = TargetDeviceData->agDevHandle; #endif TI_DBG2(("tdsaPhyControlRespRcvd: target AddrHi 0x%08x\n", TargetDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("tdsaPhyControlRespRcvd: target AddrLo 0x%08x\n", TargetDeviceData->SASAddressID.sasAddressLo)); #ifdef INITIATOR_DRIVER TI_DBG2(("tdsaPhyControlRespRcvd: satPendingIO %d satNCQMaxIO %d\n", pSatDevData->satPendingIO, pSatDevData->satNCQMaxIO )); TI_DBG2(("tdsaPhyControlRespRcvd: satPendingNCQIO %d satPendingNONNCQIO %d\n", pSatDevData->satPendingNCQIO, pSatDevData->satPendingNONNCQIO)); #endif } #ifdef INITIATOR_DRIVER /* no payload */ if (frameHeader->smpFunctionResult == SMP_FUNCTION_ACCEPTED) { TI_DBG3(("tdsaPhyControlRespRcvd: SMP success\n")); /* warm reset or clear affiliation is done call ostiInitiatorEvent() */ if (CurrentTaskTag != agNULL ) { TI_DBG3(("tdsaPhyControlRespRcvd: callback to OS layer with success\n")); pSatDevData->satDriveState = SAT_DEV_STATE_NORMAL; saSetDeviceState(agRoot, agNULL, tdsaRotateQnumber(tiRoot, TargetDeviceData), agDevHandle, SA_DS_OPERATIONAL); if (TargetDeviceData->TRflag == agTRUE) { TargetDeviceData->TRflag = agFALSE; ostiInitiatorEvent(tiRoot, TargetDeviceData->tdPortContext->tiPortalContext, &(TargetDeviceData->tiDeviceHandle), tiIntrEventTypeTransportRecovery, tiRecOK, agNULL ); } else { agDevHandle = TargetDeviceData->agDevHandle; if (agDevHandle == agNULL) { TI_DBG1(("tdsaPhyControlRespRcvd: wrong, agDevHandle is NULL\n")); } ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMOK, CurrentTaskTag ); } } } else { TI_DBG1(("tdsaPhyControlRespRcvd: SMP failure; result %d\n", frameHeader->smpFunctionResult)); /* warm reset or clear affiliation is done */ if (CurrentTaskTag != agNULL ) { TI_DBG1(("tdsaPhyControlRespRcvd: callback to OS layer with failure\n")); if (TargetDeviceData->TRflag == agTRUE) { TargetDeviceData->TRflag = agFALSE; ostiInitiatorEvent(tiRoot, TargetDeviceData->tdPortContext->tiPortalContext, &(TargetDeviceData->tiDeviceHandle), tiIntrEventTypeTransportRecovery, tiRecFailed , agNULL ); } else { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, CurrentTaskTag ); } } } #endif return; } #ifdef TARGET_DRIVER /***************************************************************************** *! \brief ttdsaAbortAll * * Purpose: This function is called to abort an all pending I/O request on a * device * * \param tiRoot: Pointer to initiator driver/port instance. * \param agRoot: Pointer to chip/driver Instance. * \param oneDeviceData: Pointer to the device * * \return: * * None * *****************************************************************************/ /* for abort itself, should we allocate tdAbortIORequestBody or get one from ttdsaXchg_t? Currently, we allocate tdAbortIORequestBody. */ osGLOBAL void ttdsaAbortAll( tiRoot_t *tiRoot, agsaRoot_t *agRoot, tdsaDeviceData_t *oneDeviceData ) { agsaIORequest_t *agAbortIORequest = agNULL; tdIORequestBody_t *tdAbortIORequestBody = agNULL; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; void *osMemHandle; TI_DBG3(("tdsaAbortAll: start\n")); TI_DBG3(("tdsaAbortAll: did %d\n", oneDeviceData->id)); /* allocating agIORequest for abort itself */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdAbortIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { /* let os process IO */ TI_DBG1(("tdsaAbortAll: ostiAllocMemory failed...\n")); return; } if (tdAbortIORequestBody == agNULL) { /* let os process IO */ TI_DBG1(("tdsaAbortAll: ostiAllocMemory returned NULL tdAbortIORequestBody\n")); return; } /* setup task management structure */ tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; /* setting callback */ /* not needed; it is already set to be ossaSSPAbortCB() */ tdAbortIORequestBody->IOCompletionFunc = ttdssIOAbortedHandler; tdAbortIORequestBody->tiDevHandle = (tiDeviceHandle_t *)&(oneDeviceData->tiDeviceHandle); /* initialize agIORequest */ agAbortIORequest = &(tdAbortIORequestBody->agIORequest); agAbortIORequest->osData = (void *) tdAbortIORequestBody; agAbortIORequest->sdkData = agNULL; /* LL takes care of this */ /* SSPAbort */ saSSPAbort(agRoot, agAbortIORequest, 0, oneDeviceData->agDevHandle, 1, /* abort all */ agNULL, agNULL ); return; } #endif /* TARGET_DRIVER */ osGLOBAL void tdsaDeregisterDevicesInPort( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; agsaRoot_t *agRoot = agNULL; agRoot = &(tdsaAllShared->agRootNonInt); TI_DBG1(("tdsaDeregisterDevicesInPort: start\n")); /* find a device's existence */ DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { TI_DBG1(("tdsaDeregisterDevicesInPort: oneDeviceData is NULL!!!\n")); return; } if (oneDeviceData->tdPortContext == onePortContext) { TI_DBG3(("tdsaDeregisterDevicesInPort: Found pid %d did %d\n", onePortContext->id, oneDeviceData->id)); if ( !( DEVICE_IS_SMP_TARGET(oneDeviceData) && oneDeviceData->directlyAttached == agTRUE)) { saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, tdsaRotateQnumber(tiRoot, oneDeviceData)); } else { TI_DBG1(("tdsaDeregisterDevicesInPort: keeping\n")); oneDeviceData->registered = agTRUE; } } DeviceListList = DeviceListList->flink; } TI_DBG3(("tdsaDeregisterDevicesInPort: end\n")); return; } /******************** for debugging only ***************************/ osGLOBAL void tdsaPrintSwConfig( agsaSwConfig_t *SwConfig ) { if (SwConfig == agNULL) { TI_DBG6(("tdsaPrintSwConfig: SwConfig is NULL\n")); return; } else { TI_DBG6(("SwConfig->maxActiveIOs %d\n", SwConfig->maxActiveIOs)); TI_DBG6(("SwConfig->smpReqTimeout %d\n", SwConfig->smpReqTimeout)); } return; } osGLOBAL void tdsaPrintHwConfig( agsaHwConfig_t *HwConfig ) { if (HwConfig == agNULL) { TI_DBG6(("tdsaPrintHwConfig: HwConfig is NULL\n")); return; } else { TI_DBG6(("HwConfig->phyCount %d\n", HwConfig->phyCount)); } return; } osGLOBAL void tdssPrintSASIdentify( agsaSASIdentify_t *id ) { if (id == agNULL) { TI_DBG1(("tdsaPrintSASIdentify: ID is NULL\n")); return; } else { TI_DBG6(("SASID->sspTargetPort %d\n", SA_IDFRM_IS_SSP_TARGET(id)?1:0)); TI_DBG6(("SASID->stpTargetPort %d\n", SA_IDFRM_IS_STP_TARGET(id)?1:0)); TI_DBG6(("SASID->smpTargetPort %d\n", SA_IDFRM_IS_SMP_TARGET(id)?1:0)); TI_DBG6(("SASID->sspInitiatorPort %d\n", SA_IDFRM_IS_SSP_INITIATOR(id)?1:0)); TI_DBG6(("SASID->stpInitiatorPort %d\n", SA_IDFRM_IS_STP_INITIATOR(id)?1:0)); TI_DBG6(("SASID->smpInitiatorPort %d\n", SA_IDFRM_IS_SMP_INITIATOR(id)?1:0)); TI_DBG6(("SASID->deviceType %d\n", SA_IDFRM_GET_DEVICETTYPE(id))); TI_DBG6(("SASID->sasAddressHi 0x%x\n", SA_IDFRM_GET_SAS_ADDRESSHI(id))); TI_DBG6(("SASID->sasAddressLo 0x%x\n", SA_IDFRM_GET_SAS_ADDRESSLO(id))); TI_DBG6(("SASID->phyIdentifier 0x%x\n", id->phyIdentifier)); } return; } osGLOBAL void tdsaInitTimerHandler( tiRoot_t *tiRoot, void *timerData ) { TI_DBG6(("tdsaInitTimerHandler: start\n")); return; } /* type: 1 portcontext 2 devicedata flag: 1 FreeLink 2 MainLink */ osGLOBAL void print_tdlist_flink(tdList_t *hdr, int type, int flag) { tdList_t *hdr_tmp1 = NULL; #ifdef TD_DEBUG_ENABLE tdsaPortContext_t *ele1; #endif #ifdef REMOVED tdsaDeviceData_t *ele2; #endif hdr_tmp1 = hdr; if (type == 1 && flag == 1) { TI_DBG6(("PortContext and FreeLink\n")); } else if (type != 1 && flag == 1) { TI_DBG6(("DeviceData and FreeLink\n")); } else if (type == 1 && flag != 1) { TI_DBG6(("PortContext and MainLink\n")); } else { TI_DBG6(("DeviceData and MainLink\n")); } if (type == 1) { do { /* data structure type variable = (data structure type, file name, header of the tdList) */ if (flag == 1) { #ifdef TD_DEBUG_ENABLE ele1 = TDLIST_OBJECT_BASE(tdsaPortContext_t, FreeLink, hdr_tmp1); #endif } else { #ifdef TD_DEBUG_ENABLE ele1 = TDLIST_OBJECT_BASE(tdsaPortContext_t, MainLink, hdr_tmp1); #endif } TI_DBG6(("flist ele %d\n", ele1->id)); TI_DBG6(("flist ele %p\n", ele1)); hdr_tmp1 = hdr_tmp1->flink; } while (hdr_tmp1 != hdr); } else { do { /* data structure type variable = (data structure type, file name, header of the tdList) */ #ifdef REMOVED if (flag == 1) { ele2 = TDLIST_OBJECT_BASE(tdsaDeviceData_t, FreeLink, hdr_tmp1); } else { ele2 = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, hdr_tmp1); } TI_DBG6(("flist ele %d\n", ele2->id)); TI_DBG6(("flist ele %p\n", ele2)); #endif hdr_tmp1 = hdr_tmp1->flink; } while (hdr_tmp1 != hdr); } TI_DBG6(("\n")); } /* not verified yet. 6/15/2005 */ osGLOBAL void print_tdlist_blink(tdList_t *hdr, int flag) { tdList_t *hdr_tmp1 = NULL; #ifdef REMOVED tdsaPortContext_t *ele1; #endif hdr_tmp1 = hdr; do { /* data structure type variable = (data structure type, file name, header of the tdList) */ #ifdef REMOVED if (flag == 1) { ele1 = TDLIST_OBJECT_BASE(tdsaPortContext_t, FreeLink, hdr_tmp1); } else { ele1 = TDLIST_OBJECT_BASE(tdsaPortContext_t, MainLink, hdr_tmp1); } TI_DBG6(("blist ele %d\n", ele1->id)); #endif hdr_tmp1 = hdr_tmp1->blink; } while (hdr_tmp1 != hdr); } /** hexidecimal dump */ void tdhexdump(const char *ptitle, bit8 *pbuf, int len) { int i; TI_DBG2(("%s - hexdump(len=%d):\n", ptitle, (int)len)); if (!pbuf) { TI_DBG1(("pbuf is NULL\n")); return; } for (i = 0; i < len; ) { if (len - i > 4) { TI_DBG2((" 0x%02x, 0x%02x, 0x%02x, 0x%02x,\n", pbuf[i], pbuf[i+1], pbuf[i+2], pbuf[i+3])); i += 4; } else { TI_DBG2((" 0x%02x,", pbuf[i])); i++; } } TI_DBG2(("\n")); } void tdsaSingleThreadedEnter(tiRoot_t *ptiRoot, bit32 queueId) { tdsaRoot_t * tiroot = agNULL; bit32 offset = 0; TD_ASSERT(ptiRoot,"ptiRoot"); tiroot = ptiRoot->tdData; offset = tiroot->tdsaAllShared.MaxNumLLLocks + tiroot->tdsaAllShared.MaxNumOSLocks; ostiSingleThreadedEnter(ptiRoot, queueId + offset); } void tdsaSingleThreadedLeave(tiRoot_t *ptiRoot, bit32 queueId) { tdsaRoot_t * tiroot = agNULL; bit32 offset = 0; TD_ASSERT(ptiRoot,"ptiRoot"); tiroot = ptiRoot->tdData; offset = tiroot->tdsaAllShared.MaxNumLLLocks + tiroot->tdsaAllShared.MaxNumOSLocks; ostiSingleThreadedLeave(ptiRoot, queueId + offset); } #ifdef PERF_COUNT void tdsaEnter(tiRoot_t *ptiRoot, int io) { ostiEnter(ptiRoot, 1, io); } void tdsaLeave(tiRoot_t *ptiRoot, int io) { ostiLeave(ptiRoot, 1, io); } #endif Index: head/sys/dev/pms/RefTisa/tisa/sassata/sas/ini/itddisc.c =================================================================== --- head/sys/dev/pms/RefTisa/tisa/sassata/sas/ini/itddisc.c (revision 359440) +++ head/sys/dev/pms/RefTisa/tisa/sassata/sas/ini/itddisc.c (revision 359441) @@ -1,8547 +1,8547 @@ /******************************************************************************* *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the *following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided *with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED *WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS *FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT *NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR *BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ********************************************************************************/ /*******************************************************************************/ /** \file * * This file contains initiator discover related functions * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #ifdef FDS_SM #include #include #include #endif #ifdef FDS_DM #include #include #include #endif #include #include #include #ifdef INITIATOR_DRIVER #include #include #include #endif #ifdef TARGET_DRIVER #include #include #include #endif #include #include /***************************************************************************** *! \brief tiINIDiscoverTargets * * Purpose: This function is called to send a transport dependent discovery * request. An implicit login will be started following the * completion of discovery. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param portalContext: Pointer to the portal context instance. * \param option: This is a bit field option on how the session is to be * created * \return: * tiSuccess Discovery initiated. * tiBusy Discovery could not be initiated at this time. * * \note: * *****************************************************************************/ osGLOBAL bit32 tiINIDiscoverTargets( tiRoot_t *tiRoot, tiPortalContext_t *portalContext, bit32 option ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdList_t *PortContextList; tdsaPortContext_t *onePortContext = agNULL; bit32 found = agFALSE; #ifdef FDS_DM dmRoot_t *dmRoot = &(tdsaAllShared->dmRoot); dmPortContext_t *dmPortContext = agNULL; #endif /* this function is called after LINK_UP by ossaHWCB() Therefore, tdsaportcontext is ready at this point */ TI_DBG3(("tiINIDiscoverTargets: start\n")); /* find a right tdsaPortContext using tiPortalContext then, check the status of tdsaPortContext then, if status is right, start the discovery */ TI_DBG6(("tiINIDiscoverTargets: portalContext %p\n", portalContext)); tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainPortContextList))) { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); TI_DBG1(("tiINIDiscoverTargets: No tdsaPortContext\n")); return tiError; } else { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } /* find a right portcontext */ PortContextList = tdsaAllShared->MainPortContextList.flink; if (PortContextList == agNULL) { TI_DBG1(("tiINIDiscoverTargets: PortContextList is NULL\n")); return tiError; } while (PortContextList != &(tdsaAllShared->MainPortContextList)) { onePortContext = TDLIST_OBJECT_BASE(tdsaPortContext_t, MainLink, PortContextList); if (onePortContext == agNULL) { TI_DBG1(("tiINIDiscoverTargets: onePortContext is NULL, PortContextList = %p\n", PortContextList)); return tiError; } if (onePortContext->tiPortalContext == portalContext && onePortContext->valid == agTRUE) { TI_DBG6(("tiINIDiscoverTargets: found; oneportContext ID %d\n", onePortContext->id)); found = agTRUE; break; } PortContextList = PortContextList->flink; } if (found == agFALSE) { TI_DBG1(("tiINIDiscoverTargets: No corresponding tdsaPortContext\n")); return tiError; } TI_DBG2(("tiINIDiscoverTargets: pid %d\n", onePortContext->id)); if (onePortContext->DiscoveryState == ITD_DSTATE_NOT_STARTED) { TI_DBG6(("tiINIDiscoverTargets: calling Discovery\n")); /* start SAS discovery */ #ifdef FDS_DM if (onePortContext->UseDM == agTRUE) { TI_DBG1(("tiINIDiscoverTargets: calling dmDiscover, pid %d\n", onePortContext->id)); onePortContext->DiscoveryState = ITD_DSTATE_STARTED; dmPortContext = &(onePortContext->dmPortContext); dmDiscover(dmRoot, dmPortContext, DM_DISCOVERY_OPTION_FULL_START); } else { /* complete discovery */ onePortContext->DiscoveryState = ITD_DSTATE_COMPLETED; ostiInitiatorEvent( tiRoot, portalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscOK, agNULL ); return tiSuccess; } #else #ifdef TD_DISCOVER tdsaDiscover( tiRoot, onePortContext, AG_SA_DISCOVERY_TYPE_SAS, TDSA_DISCOVERY_OPTION_FULL_START ); #else saDiscover(onePortContext->agRoot, onePortContext->agPortContext, AG_SA_DISCOVERY_TYPE_SAS, onePortContext->discoveryOptions); #endif #endif /* FDS_DM */ } else { TI_DBG1(("tiINIDiscoverTargets: Discovery has started or incorrect initialization; state %d pid 0x%x\n", onePortContext->DiscoveryState, onePortContext->id)); return tiError; } return tiSuccess; } /***************************************************************************** *! \brief tiINIGetDeviceHandles * * Purpose: This routine is called to to return the device handles for each * device currently available. * * \param tiRoot: Pointer to driver Instance. * \param tiPortalContext: Pointer to the portal context instance. * \param agDev[]: Array to receive pointers to the device handles. * \param maxDevs: Number of device handles which will fit in array pointed * by agDev. * \return: * Number of device handle slots present (however, only maxDevs * are copied into tiDev[]) which may be greater than the number of * handles actually present. In short, returns the number of devices that * were found. * * \note: * *****************************************************************************/ osGLOBAL bit32 tiINIGetDeviceHandles( tiRoot_t * tiRoot, tiPortalContext_t * tiPortalContext, tiDeviceHandle_t * tiDev[], bit32 maxDevs ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdList_t *PortContextList; tdsaPortContext_t *onePortContext = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; bit32 i; bit32 FoundDevices = 0; bit32 DeviceIndex = 0; bit32 found = agFALSE; #ifdef TD_DEBUG_ENABLE satDeviceData_t *pSatDevData; #endif #ifdef FDS_DM dmRoot_t *dmRoot = &(tdsaAllShared->dmRoot); #endif TI_DBG2(("tiINIGetDeviceHandles: start\n")); TI_DBG2(("tiINIGetDeviceHandles: tiPortalContext %p\n", tiPortalContext)); if (maxDevs == 0) { TI_DBG1(("tiINIGetDeviceHandles: maxDevs is 0\n")); TI_DBG1(("tiINIGetDeviceHandles: first, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainPortContextList))) { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); TI_DBG1(("tiINIGetDeviceHandles: No available tdsaPortContext\n")); TI_DBG1(("tiINIGetDeviceHandles: second, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } else { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } /* find a corresponding portcontext */ PortContextList = tdsaAllShared->MainPortContextList.flink; while (PortContextList != &(tdsaAllShared->MainPortContextList)) { onePortContext = TDLIST_OBJECT_BASE(tdsaPortContext_t, MainLink, PortContextList); if(onePortContext == agNULL) continue; TI_DBG3(("tiINIGetDeviceHandles: oneportContext pid %d\n", onePortContext->id)); if (onePortContext->tiPortalContext == tiPortalContext && onePortContext->valid == agTRUE) { TI_DBG3(("tiINIGetDeviceHandles: found; oneportContext pid %d\n", onePortContext->id)); found = agTRUE; break; } PortContextList = PortContextList->flink; } if (found == agFALSE) { TI_DBG1(("tiINIGetDeviceHandles: First, No corresponding tdsaPortContext\n")); TI_DBG1(("tiINIGetDeviceHandles: third, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } if (onePortContext == agNULL) { TI_DBG1(("tiINIGetDeviceHandles: Second, No corressponding tdsaPortContext\n")); TI_DBG1(("tiINIGetDeviceHandles: fourth, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } if (onePortContext->valid == agFALSE) { TI_DBG1(("tiINIGetDeviceHandles: Third, tdsaPortContext is invalid, pid %d\n", onePortContext->id)); TI_DBG1(("tiINIGetDeviceHandles: fifth, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } if (onePortContext->DiscoveryState == ITD_DSTATE_COMPLETED && onePortContext->DMDiscoveryState == dmDiscFailed) { TI_DBG1(("tiINIGetDeviceHandles: forth, discovery failed, pid %d\n", onePortContext->id)); TI_DBG1(("tiINIGetDeviceHandles: sixth, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } if (onePortContext->DiscoveryState != ITD_DSTATE_COMPLETED) { TI_DBG1(("tiINIGetDeviceHandles: discovery not completed\n")); TI_DBG1(("tiINIGetDeviceHandles: sixth, returning DISCOVERY_IN_PROGRESS, pid %d\n", onePortContext->id)); onePortContext->discovery.forcedOK = agTRUE; return DISCOVERY_IN_PROGRESS; } TI_DBG2(("tiINIGetDeviceHandles: pid %d\n", onePortContext->id)); #ifdef FDS_DM tdsaUpdateMCN(dmRoot, onePortContext); #endif /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } /* From the device list, returns only valid devices */ DeviceListList = tdsaAllShared->MainDeviceList.flink; TD_ASSERT(DeviceListList, "DeviceListList NULL"); if (DeviceListList == agNULL ) { TI_DBG1(("tiINIGetDeviceHandles: DeviceListList == agNULL\n")); TI_DBG1(("tiINIGetDeviceHandles: seventh, returning not found, pid %d\n", onePortContext->id)); return 0; } while ((DeviceIndex < maxDevs) && DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); #ifdef TD_DEBUG_ENABLE pSatDevData = (satDeviceData_t *)&(oneDeviceData->satDevData); if (pSatDevData != agNULL) { TI_DBG3(("tiINIGetDeviceHandles: device %p satPendingIO %d satNCQMaxIO %d\n",pSatDevData, pSatDevData->satPendingIO, pSatDevData->satNCQMaxIO )); TI_DBG3(("tiINIGetDeviceHandles: device %p satPendingNCQIO %d satPendingNONNCQIO %d\n",pSatDevData, pSatDevData->satPendingNCQIO, pSatDevData->satPendingNONNCQIO)); } #endif TI_DBG3(("tiINIGetDeviceHandles: pid %d did %d\n", onePortContext->id, oneDeviceData->id)); TI_DBG3(("tiINIGetDeviceHandles: device AddrHi 0x%08x AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG6(("tiINIGetDeviceHandles: handle %p\n", &(oneDeviceData->tiDeviceHandle))); if (oneDeviceData->tdPortContext != onePortContext) { TI_DBG3(("tiINIGetDeviceHandles: different port\n")); DeviceListList = DeviceListList->flink; } else { #ifdef SATA_ENABLE if ((oneDeviceData->valid == agTRUE) && (oneDeviceData->registered == agTRUE) && (oneDeviceData->tdPortContext == onePortContext) && ( DEVICE_IS_SSP_TARGET(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData) || DEVICE_IS_SATA_DEVICE(oneDeviceData) ) ) #else if ((oneDeviceData->valid == agTRUE) && (oneDeviceData->registered == agTRUE) && (oneDeviceData->tdPortContext == onePortContext) && ( DEVICE_IS_SSP_TARGET(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData) ) ) #endif { if (DEVICE_IS_SSP_TARGET(oneDeviceData)) { TI_DBG2(("tiINIGetDeviceHandles: SSP DeviceIndex %d tiDeviceHandle %p\n", DeviceIndex, &(oneDeviceData->tiDeviceHandle))); tiDev[DeviceIndex] = &(oneDeviceData->tiDeviceHandle); FoundDevices++; } else if ( (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) && oneDeviceData->satDevData.IDDeviceValid == agTRUE ) { TI_DBG2(("tiINIGetDeviceHandles: SATA DeviceIndex %d tiDeviceHandle %p\n", DeviceIndex, &(oneDeviceData->tiDeviceHandle))); tiDev[DeviceIndex] = &(oneDeviceData->tiDeviceHandle); FoundDevices++; } else { TI_DBG3(("tiINIGetDeviceHandles: skip case !!!\n")); TI_DBG3(("tiINIGetDeviceHandles: valid %d SSP target %d STP target %d SATA device %d\n", oneDeviceData->valid, DEVICE_IS_SSP_TARGET(oneDeviceData), DEVICE_IS_STP_TARGET(oneDeviceData), DEVICE_IS_SATA_DEVICE(oneDeviceData))); TI_DBG3(("tiINIGetDeviceHandles: oneDeviceData->satDevData.IDDeviceValid %d\n", oneDeviceData->satDevData.IDDeviceValid)); TI_DBG3(("tiINIGetDeviceHandles: registered %d right port %d \n", oneDeviceData->registered, (oneDeviceData->tdPortContext == onePortContext))); TI_DBG3(("tiINIGetDeviceHandles: oneDeviceData->tdPortContext %p onePortContext %p\n", oneDeviceData->tdPortContext, onePortContext)); } TI_DBG3(("tiINIGetDeviceHandles: valid FoundDevices %d\n", FoundDevices)); TI_DBG3(("tiINIGetDeviceHandles: agDevHandle %p\n", oneDeviceData->agDevHandle)); } else { TI_DBG3(("tiINIGetDeviceHandles: valid %d SSP target %d STP target %d SATA device %d\n", oneDeviceData->valid, DEVICE_IS_SSP_TARGET(oneDeviceData), DEVICE_IS_STP_TARGET(oneDeviceData), DEVICE_IS_SATA_DEVICE(oneDeviceData))); TI_DBG3(("tiINIGetDeviceHandles: registered %d right port %d \n", oneDeviceData->registered, (oneDeviceData->tdPortContext == onePortContext))); TI_DBG3(("tiINIGetDeviceHandles: oneDeviceData->tdPortContext %p onePortContext %p\n", oneDeviceData->tdPortContext, onePortContext)); } DeviceIndex++; DeviceListList = DeviceListList->flink; } /* else */ } if (DeviceIndex > maxDevs) { TI_DBG1(("tiINIGetDeviceHandles: DeviceIndex(%d) >= maxDevs(%d)\n", DeviceIndex, maxDevs)); FoundDevices = maxDevs; } TI_DBG1(("tiINIGetDeviceHandles: returning %d found devices, pid %d\n", FoundDevices, onePortContext->id)); return FoundDevices; } /***************************************************************************** *! \brief tiINIGetDeviceHandlesForWinIOCTL * * Purpose: This routine is called to to return the device handles for each * device currently available, this routine is only for Win IOCTL to display SAS topology. * * \param tiRoot: Pointer to driver Instance. * \param tiPortalContext: Pointer to the portal context instance. * \param agDev[]: Array to receive pointers to the device handles. * \param maxDevs: Number of device handles which will fit in array pointed * by agDev. * \return: * Number of device handle slots present (however, only maxDevs * are copied into tiDev[]) which may be greater than the number of * handles actually present. In short, returns the number of devices that * were found. * * \note: * *****************************************************************************/ osGLOBAL bit32 tiINIGetDeviceHandlesForWinIOCTL( tiRoot_t * tiRoot, tiPortalContext_t * tiPortalContext, tiDeviceHandle_t * tiDev[], bit32 maxDevs ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdList_t *PortContextList; tdsaPortContext_t *onePortContext = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; bit32 i; bit32 FoundDevices = 0; bit32 DeviceIndex = 0; bit32 found = agFALSE; #ifdef TD_DEBUG_ENABLE satDeviceData_t *pSatDevData; #endif #ifdef FDS_DM dmRoot_t *dmRoot = &(tdsaAllShared->dmRoot); #endif TI_DBG2(("tiINIGetDeviceHandlesForWinIOCTL: start\n")); TI_DBG2(("tiINIGetDeviceHandlesForWinIOCTL: tiPortalContext %p\n", tiPortalContext)); if (maxDevs == 0) { TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: maxDevs is 0\n")); TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: first, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainPortContextList))) { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: No available tdsaPortContext\n")); TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: second, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } else { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } /* find a corresponding portcontext */ PortContextList = tdsaAllShared->MainPortContextList.flink; while (PortContextList != &(tdsaAllShared->MainPortContextList)) { onePortContext = TDLIST_OBJECT_BASE(tdsaPortContext_t, MainLink, PortContextList); if(onePortContext == agNULL) continue; TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: oneportContext pid %d\n", onePortContext->id)); if (onePortContext->tiPortalContext == tiPortalContext && onePortContext->valid == agTRUE) { TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: found; oneportContext pid %d\n", onePortContext->id)); found = agTRUE; break; } PortContextList = PortContextList->flink; } if (found == agFALSE) { TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: First, No corresponding tdsaPortContext\n")); TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: third, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } if (onePortContext == agNULL) { TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: Second, No corressponding tdsaPortContext\n")); TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: fourth, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } if (onePortContext->valid == agFALSE) { TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: Third, tdsaPortContext is invalid, pid %d\n", onePortContext->id)); TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: fifth, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } if (onePortContext->DiscoveryState == ITD_DSTATE_COMPLETED && onePortContext->DMDiscoveryState == dmDiscFailed) { TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: forth, discovery failed, pid %d\n", onePortContext->id)); TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: sixth, returning 0\n")); /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } return 0; } if (onePortContext->DiscoveryState != ITD_DSTATE_COMPLETED) { TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: discovery not completed\n")); TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: sixth, returning DISCOVERY_IN_PROGRESS, pid %d\n", onePortContext->id)); onePortContext->discovery.forcedOK = agTRUE; return DISCOVERY_IN_PROGRESS; } TI_DBG2(("tiINIGetDeviceHandlesForWinIOCTL: pid %d\n", onePortContext->id)); #ifdef FDS_DM tdsaUpdateMCN(dmRoot, onePortContext); #endif /* nullify all device handles */ for (i = 0 ; i < maxDevs ; i++) { tiDev[i] = agNULL; } /* From the device list, returns only valid devices */ DeviceListList = tdsaAllShared->MainDeviceList.flink; TD_ASSERT(DeviceListList, "DeviceListList NULL"); if (DeviceListList == agNULL ) { TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: DeviceListList == agNULL\n")); TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: seventh, returning not found, pid %d\n", onePortContext->id)); return 0; } while ((DeviceIndex < maxDevs) && DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if(oneDeviceData == agNULL) { TI_DBG3(("tiINIGetDeviceHandles: OneDeviceData is NULL\n")); return 0; } #ifdef TD_DEBUG_ENABLE pSatDevData = (satDeviceData_t *)&(oneDeviceData->satDevData); if (pSatDevData != agNULL) { TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: device %p satPendingIO %d satNCQMaxIO %d\n",pSatDevData, pSatDevData->satPendingIO, pSatDevData->satNCQMaxIO )); TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: device %p satPendingNCQIO %d satPendingNONNCQIO %d\n",pSatDevData, pSatDevData->satPendingNCQIO, pSatDevData->satPendingNONNCQIO)); } #endif TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: pid %d did %d\n", onePortContext->id, oneDeviceData->id)); TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: device AddrHi 0x%08x AddrLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG6(("tiINIGetDeviceHandlesForWinIOCTL: handle %p\n", &(oneDeviceData->tiDeviceHandle))); if (oneDeviceData->tdPortContext != onePortContext) { TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: different port\n")); DeviceListList = DeviceListList->flink; } else { #ifdef SATA_ENABLE if ((oneDeviceData->valid == agTRUE) && (oneDeviceData->registered == agTRUE) && (oneDeviceData->tdPortContext == onePortContext) && ( DEVICE_IS_SSP_TARGET(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData) || DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_SMP_TARGET(oneDeviceData)) ) #else if ((oneDeviceData->valid == agTRUE) && (oneDeviceData->registered == agTRUE) && (oneDeviceData->tdPortContext == onePortContext) && ( DEVICE_IS_SSP_TARGET(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) ) #endif { if (DEVICE_IS_SSP_TARGET(oneDeviceData)) { TI_DBG2(("tiINIGetDeviceHandlesForWinIOCTL: SSP DeviceIndex %d tiDeviceHandle %p\n", DeviceIndex, &(oneDeviceData->tiDeviceHandle))); tiDev[DeviceIndex] = &(oneDeviceData->tiDeviceHandle); DeviceIndex++; FoundDevices++; } else if ( (DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData)) && oneDeviceData->satDevData.IDDeviceValid == agTRUE ) { TI_DBG2(("tiINIGetDeviceHandlesForWinIOCTL: SATA DeviceIndex %d tiDeviceHandle %p\n", DeviceIndex, &(oneDeviceData->tiDeviceHandle))); tiDev[DeviceIndex] = &(oneDeviceData->tiDeviceHandle); DeviceIndex++; FoundDevices++; } else if (DEVICE_IS_SMP_TARGET(oneDeviceData)) { TI_DBG2(("tiINIGetDeviceHandlesForWinIOCTL: SMP DeviceIndex %d tiDeviceHandle %p\n", DeviceIndex, &(oneDeviceData->tiDeviceHandle))); tiDev[DeviceIndex] = &(oneDeviceData->tiDeviceHandle); DeviceIndex++; FoundDevices++; } else { TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: skip case !!!\n")); TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: valid %d SSP target %d STP target %d SATA device %d\n", oneDeviceData->valid, DEVICE_IS_SSP_TARGET(oneDeviceData), DEVICE_IS_STP_TARGET(oneDeviceData), DEVICE_IS_SATA_DEVICE(oneDeviceData))); TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: oneDeviceData->satDevData.IDDeviceValid %d\n", oneDeviceData->satDevData.IDDeviceValid)); TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: registered %d right port %d \n", oneDeviceData->registered, (oneDeviceData->tdPortContext == onePortContext))); TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: oneDeviceData->tdPortContext %p onePortContext %p\n", oneDeviceData->tdPortContext, onePortContext)); } TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: valid FoundDevices %d\n", FoundDevices)); TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: agDevHandle %p\n", oneDeviceData->agDevHandle)); } else { TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: valid %d SSP target %d STP target %d SATA device %d\n", oneDeviceData->valid, DEVICE_IS_SSP_TARGET(oneDeviceData), DEVICE_IS_STP_TARGET(oneDeviceData), DEVICE_IS_SATA_DEVICE(oneDeviceData))); TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: registered %d right port %d \n", oneDeviceData->registered, (oneDeviceData->tdPortContext == onePortContext))); TI_DBG3(("tiINIGetDeviceHandlesForWinIOCTL: oneDeviceData->tdPortContext %p onePortContext %p\n", oneDeviceData->tdPortContext, onePortContext)); } //DeviceIndex++; DeviceListList = DeviceListList->flink; } /* else */ } if (DeviceIndex > maxDevs) { TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: DeviceIndex(%d) >= maxDevs(%d)\n", DeviceIndex, maxDevs)); FoundDevices = maxDevs; } TI_DBG1(("tiINIGetDeviceHandlesForWinIOCTL: returning %d found devices, pid %d\n", FoundDevices, onePortContext->id)); return FoundDevices; } /***************************************************************************** *! \brief tiINIGetDeviceInfo * * Purpose: This routine is called by the OS Layer find out * the name associated with the device and where * it is mapped (address1 and address2). * * \param tiRoot: Pointer to driver Instance. * \param tiDeviceHandle: device handle associated with the device * \param tiDeviceInfo: pointer to structure where the information * needs to be copied. * \return: * tiSuccess - successful * tiInvalidHandle - device handle passed is not a valid handle. * * \note: * *****************************************************************************/ osGLOBAL bit32 tiINIGetDeviceInfo( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle, tiDeviceInfo_t *tiDeviceInfo) { tdsaDeviceData_t *oneDeviceData = agNULL; satDeviceData_t *pSatDevData = agNULL; bit8 id_limit[5]; bit8 SN_id_limit[25]; agsaRoot_t *agRoot = agNULL; TI_DBG6(("tiINIGetDeviceInfo: start \n")); if (tiDeviceHandle == agNULL) { TI_DBG6(("tiINIGetDeviceInfo: tiDeviceHandle NULL\n")); return tiInvalidHandle; } if (tiDeviceHandle->tdData == agNULL) { TI_DBG6(("tiINIGetDeviceInfo: ^^^^^^^^^ tiDeviceHandle->tdData NULL\n")); return tiInvalidHandle; } else { oneDeviceData = (tdsaDeviceData_t *)(tiDeviceHandle->tdData); agRoot = oneDeviceData->agRoot; TI_DBG6(("tiINIGetDeviceInfo: ^^^^^^^^^ tiDeviceHandle->tdData NOT NULL\n")); } if (oneDeviceData == agNULL) { TI_DBG6(("tiINIGetDeviceInfo: ^^^^^^^^^ oneDeviceData NULL\n")); return tiInvalidHandle; } /* filling in the link rate */ if (oneDeviceData->registered == agTRUE) { tiDeviceInfo->info.devType_S_Rate = oneDeviceData->agDeviceInfo.devType_S_Rate; } else { tiDeviceInfo->info.devType_S_Rate = (bit8)(oneDeviceData->agDeviceInfo.devType_S_Rate & 0x0f); } /* just returning local and remote SAS address; doesn't have a name for SATA device, returns identify device data */ if (DEVICE_IS_SATA_DEVICE(oneDeviceData) && (oneDeviceData->directlyAttached == agTRUE)) { osti_memset(&id_limit, 0, sizeof(id_limit)); osti_memset(&SN_id_limit, 0, sizeof(SN_id_limit)); /* SATA signature 0xABCD */ id_limit[0] = 0xA; id_limit[1] = 0xB; id_limit[2] = 0xC; id_limit[3] = 0xD; pSatDevData = &(oneDeviceData->satDevData); if (pSatDevData->satNCQ == agTRUE) { id_limit[4] = (bit8)pSatDevData->satNCQMaxIO; } else { /* no NCQ */ id_limit[4] = 1; } osti_memcpy(&SN_id_limit, &(oneDeviceData->satDevData.satIdentifyData.serialNumber), 20); osti_memcpy(&(SN_id_limit[20]), &id_limit, 5); osti_memcpy(oneDeviceData->satDevData.SN_id_limit, SN_id_limit, 25); /* serialNumber, 20 bytes + ABCD + NCQ LENGTH ; modelNumber, 40 bytes */ // tiDeviceInfo->remoteName = (char *)&(oneDeviceData->satDevData.satIdentifyData.serialNumber); tiDeviceInfo->remoteName = (char *)oneDeviceData->satDevData.SN_id_limit; tiDeviceInfo->remoteAddress = (char *)&(oneDeviceData->satDevData.satIdentifyData.modelNumber); // TI_DBG1(("tiINIGetDeviceInfo: SATA device remote hi 0x%08x lo 0x%08x\n", oneDeviceData->tdPortContext->sasRemoteAddressHi, oneDeviceData->tdPortContext->sasRemoteAddressLo)); // tdhexdump("tiINIGetDeviceInfo remotename", (bit8 *)&(oneDeviceData->satDevData.satIdentifyData.serialNumber), 20); // tdhexdump("tiINIGetDeviceInfo new name", (bit8 *)&(SN_id_limit), sizeof(SN_id_limit)); // tdhexdump("tiINIGetDeviceInfo remoteaddress", (bit8 *)&(oneDeviceData->satDevData.satIdentifyData.modelNumber),40); tiDeviceInfo->osAddress1 = 25; tiDeviceInfo->osAddress2 = 40; } else if (DEVICE_IS_STP_TARGET(oneDeviceData)) { /* serialNumber, 20 bytes; modelNumber, 40 bytes */ tiDeviceInfo->remoteName = (char *)&(oneDeviceData->satDevData.satIdentifyData.serialNumber); tiDeviceInfo->remoteAddress = (char *)&(oneDeviceData->satDevData.satIdentifyData.modelNumber); // TI_DBG1(("tiINIGetDeviceInfo: SATA device remote hi 0x%08x lo 0x%08x\n", oneDeviceData->tdPortContext->sasRemoteAddressHi, oneDeviceData->tdPortContext->sasRemoteAddressLo)); // tdhexdump("tiINIGetDeviceInfo remotename", (bit8 *)&(oneDeviceData->satDevData.satIdentifyData.serialNumber), 20); // tdhexdump("tiINIGetDeviceInfo remoteaddress", (bit8 *)&(oneDeviceData->satDevData.satIdentifyData.modelNumber),40); tiDeviceInfo->osAddress1 = 20; tiDeviceInfo->osAddress2 = 40; } else { tiDeviceInfo->remoteName = (char *)&(oneDeviceData->SASAddressID.sasAddressHi); tiDeviceInfo->remoteAddress = (char *)&(oneDeviceData->SASAddressID.sasAddressLo); TI_DBG1(("tiINIGetDeviceInfo: SAS device remote hi 0x%08x lo 0x%08x\n", oneDeviceData->tdPortContext->sasRemoteAddressHi, oneDeviceData->tdPortContext->sasRemoteAddressLo)); tiDeviceInfo->osAddress1 = 4; tiDeviceInfo->osAddress2 = 4; } tiDeviceInfo->localName = (char *)&(oneDeviceData->tdPortContext->sasLocalAddressHi); tiDeviceInfo->localAddress = (char *)&(oneDeviceData->tdPortContext->sasLocalAddressLo); TI_DBG6(("tiINIGetDeviceInfo: local hi 0x%08x lo 0x%08x\n", oneDeviceData->tdPortContext->sasLocalAddressHi, oneDeviceData->tdPortContext->sasLocalAddressLo)); if (oneDeviceData->agDevHandle == agNULL) { TI_DBG1(("tiINIGetDeviceInfo: Error! oneDeviceData->agDevHandle is NULL")); return tiError; } else { saGetDeviceInfo(agRoot, agNULL, 0, 0,oneDeviceData->agDevHandle); } return tiSuccess; } /***************************************************************************** *! \brief tiINILogin * * Purpose: This function is called to request that the Transport Dependent * Layer initiates login for a specific target. * * \param tiRoot: Pointer to driver Instance. * \param tiDeviceHandle: Pointer to a target device handle discovered * following the discovery. * * \return: * tiSuccess Login initiated. * tiError Login failed. * tiBusy Login can not be initiated at this time. * tiNotSupported This API is currently not supported by this * Transport Layer * * *****************************************************************************/ osGLOBAL bit32 tiINILogin( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle ) { TI_DBG6(("tiINILogin: start\n")); return tiNotSupported; } /***************************************************************************** *! \brief tiINILogout * * Purpose: This function is called to request that the Transport Dependent * Layer initiates logout for a specific target from the previously * successful login through tiINILogin() call. * * \param tiRoot : Pointer to the OS Specific module allocated tiRoot_t * instance. * \param tiDeviceHandle: Pointer to a target device handle. * * \return: * tiSuccess Logout initiated. * tiError Logout failed. * tiBusy Logout can not be initiated at this time. * tiNotSupported This API is currently not supported by this * Transport Layer * * *****************************************************************************/ osGLOBAL bit32 tiINILogout( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle ) { TI_DBG6(("tiINILogout: start\n")); return tiNotSupported; } /***************************************************************************** *! \brief tiINIGetExpander * * * \note: * *****************************************************************************/ osGLOBAL bit32 tiINIGetExpander( tiRoot_t * tiRoot, tiPortalContext_t * tiPortalContext, tiDeviceHandle_t * tiDev, tiDeviceHandle_t ** tiExp ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdList_t *PortContextList; tdsaPortContext_t *onePortContext = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; tdsaDeviceData_t *oneTargetDeviceData = agNULL; tdsaDeviceData_t *oneExpanderDeviceData = agNULL; bit32 found = agFALSE; oneTargetDeviceData = (tdsaDeviceData_t *)tiDev->tdData; if (oneTargetDeviceData == agNULL) { TI_DBG1(("tiINIGetExpander: oneTargetDeviceData is NULL\n")); return tiError; } tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainPortContextList))) { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); TI_DBG1(("tiINIGetExpander: No available tdsaPortContext\n")); TI_DBG1(("tiINIGetExpander: second, returning 0\n")); return tiError; } else { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } /* find a corresponding portcontext */ PortContextList = tdsaAllShared->MainPortContextList.flink; while (PortContextList != &(tdsaAllShared->MainPortContextList)) { onePortContext = TDLIST_OBJECT_BASE(tdsaPortContext_t, MainLink, PortContextList); TI_DBG3(("tiINIGetExpander: oneportContext pid %d\n", onePortContext->id)); if (onePortContext->tiPortalContext == tiPortalContext && onePortContext->valid == agTRUE) { TI_DBG3(("tiINIGetExpander: found; oneportContext pid %d\n", onePortContext->id)); found = agTRUE; break; } PortContextList = PortContextList->flink; } if (found == agFALSE) { TI_DBG1(("tiINIGetExpander: First, No corresponding tdsaPortContext\n")); TI_DBG1(("tiINIGetExpander: third, returning 0\n")); return tiError; } if (onePortContext == agNULL) { TI_DBG1(("tiINIGetExpander: Second, No corressponding tdsaPortContext\n")); TI_DBG1(("tiINIGetExpander: fourth, returning 0\n")); return tiError; } if (onePortContext->valid == agFALSE) { TI_DBG1(("tiINIGetExpander: Third, tdsaPortContext is invalid, pid %d\n", onePortContext->id)); TI_DBG1(("tiINIGetExpander: fifth, returning 0\n")); return tiError; } DeviceListList = tdsaAllShared->MainDeviceList.flink; while ( DeviceListList != &(tdsaAllShared->MainDeviceList) ) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if (oneDeviceData->tdPortContext != onePortContext) { TI_DBG3(("tiINIGetExpander: different port\n")); DeviceListList = DeviceListList->flink; } else { if (oneDeviceData == oneTargetDeviceData) { oneExpanderDeviceData = oneDeviceData->ExpDevice; if (oneExpanderDeviceData == agNULL) { TI_DBG1(("tiINIGetExpander: oneExpanderDeviceData is NULL\n")); return tiError; } *tiExp = &(oneExpanderDeviceData->tiDeviceHandle); return tiSuccess; } DeviceListList = DeviceListList->flink; } } return tiError; } osGLOBAL void tiIniGetDirectSataSasAddr(tiRoot_t * tiRoot, bit32 phyId, bit8 **sasAddressHi, bit8 **sasAddressLo) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot = &tdsaAllShared->agRootInt; tiIOCTLPayload_wwn_t agIoctlPayload; bit8 nvmDev; bit32 status; int i; agIoctlPayload.Length = 4096; agIoctlPayload.Reserved = 0; agIoctlPayload.MinorFunction = IOCTL_MN_NVMD_GET_CONFIG; agIoctlPayload.MajorFunction = IOCTL_MJ_NVMD_GET; tiCOMDelayedInterruptHandler(tiRoot, 0,1, tiNonInterruptContext); if(tiIS_SPC(agRoot)) { nvmDev = 4; status = tdsaNVMDGetIoctl(tiRoot, (tiIOCTLPayload_t *)&agIoctlPayload, agNULL, agNULL, &nvmDev); } else { nvmDev = 1; status = tdsaNVMDGetIoctl(tiRoot, (tiIOCTLPayload_t *)&agIoctlPayload, agNULL, agNULL, &nvmDev); } if(status == IOCTL_CALL_FAIL) { #if !(defined(__FreeBSD__)) printk("Error getting Adapter WWN\n"); #else printf("Error getting Adapter WWN\n"); #endif return; } for(i=0; i< TD_MAX_NUM_PHYS; i++) { *(bit32 *)(tdsaAllShared->Ports[i].SASID.sasAddressHi) = *(bit32 *)&agIoctlPayload.FunctionSpecificArea[0]; *(bit32 *)(tdsaAllShared->Ports[i].SASID.sasAddressLo) = *(bit32 *)&agIoctlPayload.FunctionSpecificArea[4]; TI_DBG3(("SAS AddressHi is 0x%x\n", *(bit32 *)(tdsaAllShared->Ports[i].SASID.sasAddressHi))); TI_DBG3(("SAS AddressLo is 0x%x\n", *(bit32 *)(tdsaAllShared->Ports[i].SASID.sasAddressLo))); } *sasAddressHi = tdsaAllShared->Ports[phyId].SASID.sasAddressHi; *sasAddressLo = tdsaAllShared->Ports[phyId].SASID.sasAddressLo; } osGLOBAL tiDeviceHandle_t * tiINIGetExpDeviceHandleBySasAddress( tiRoot_t * tiRoot, tiPortalContext_t * tiPortalContext, bit32 sas_addr_hi, bit32 sas_addr_lo, bit32 maxDevs ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdList_t *PortContextList; tdsaPortContext_t *onePortContext = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; //bit32 i; //bit32 FoundDevices = 0; bit32 DeviceIndex = 0; bit32 found = agFALSE; TI_DBG2(("tiINIGetExpDeviceHandleBySasAddress: start\n")); TI_DBG2(("tiINIGetExpDeviceHandleBySasAddress: tiPortalContext %p\n", tiPortalContext)); if (maxDevs == 0) { TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: maxDevs is 0\n")); return agNULL; } tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainPortContextList))) { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: No available tdsaPortContext\n")); TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: second, returning 0\n")); return agNULL; } else { tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } /* find a corresponding portcontext */ PortContextList = tdsaAllShared->MainPortContextList.flink; if(PortContextList == agNULL) { TI_DBG6(("tiINIGetExpDeviceHandleBySasAddress: PortContextList is NULL!!\n")); return agNULL; } while (PortContextList != &(tdsaAllShared->MainPortContextList)) { onePortContext = TDLIST_OBJECT_BASE(tdsaPortContext_t, MainLink, PortContextList); if(onePortContext == agNULL) { TI_DBG6(("tiINIGetExpDeviceHandleBySasAddress: onePortContext is NULL!!\n")); return agNULL; } TI_DBG3(("tiINIGetExpDeviceHandleBySasAddress: oneportContext pid %d\n", onePortContext->id)); if (onePortContext->tiPortalContext == tiPortalContext && onePortContext->valid == agTRUE) { TI_DBG3(("tiINIGetExpDeviceHandleBySasAddress: found; oneportContext pid %d\n", onePortContext->id)); found = agTRUE; break; } if(PortContextList != agNULL) { PortContextList = PortContextList->flink; } } if (found == agFALSE) { TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: First, No corresponding tdsaPortContext\n")); TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: third, returning 0\n")); /* nullify all device handles */ return agNULL; } if (onePortContext == agNULL) { TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: Second, No corressponding tdsaPortContext\n")); TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: fourth, returning 0\n")); /* nullify all device handles */ return agNULL; } if (onePortContext->valid == agFALSE) { TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: Third, tdsaPortContext is invalid, pid %d\n", onePortContext->id)); TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: fifth, returning 0\n")); return agNULL; } TI_DBG2(("tiINIGetExpDeviceHandleBySasAddress: pid %d\n", onePortContext->id)); /* to do: check maxdev and length of Mainlink */ /* From the device list, returns only valid devices */ DeviceListList = tdsaAllShared->MainDeviceList.flink; if(DeviceListList == agNULL) { TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: DeviceListList == agNULL\n")); TI_DBG1(("tiINIGetExpDeviceHandleBySasAddress: seventh, returning not found, pid %d\n", onePortContext->id)); return agNULL; } while ((DeviceIndex < maxDevs) && DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if(oneDeviceData == agNULL) { TI_DBG3(("tiINIGetExpDeviceHandleBySasAddress: oneDeviceData is NULL!!\n")); return agNULL; } TI_DBG6(("tiINIGetExpDeviceHandleBySasAddress: handle %p\n", &(oneDeviceData->tiDeviceHandle))); if (oneDeviceData->tdPortContext != onePortContext) { TI_DBG3(("tiINIGetExpDeviceHandleBySasAddress: different port\n")); if(DeviceListList != agNULL) { DeviceListList = DeviceListList->flink; } } else { if ((oneDeviceData->valid == agTRUE) && (oneDeviceData->registered == agTRUE) && (oneDeviceData->tdPortContext == onePortContext) && ( (oneDeviceData->SASSpecDeviceType == SAS_EDGE_EXPANDER_DEVICE) || (oneDeviceData->SASSpecDeviceType == SAS_FANOUT_EXPANDER_DEVICE) || DEVICE_IS_SMP_TARGET(oneDeviceData) ) ) { if(oneDeviceData->SASAddressID.sasAddressLo == sas_addr_lo && oneDeviceData->SASAddressID.sasAddressHi == sas_addr_hi) { //TI_DBG3(("tiINIGetExpDeviceHandleBySasAddress: valid FoundDevices %d\n", FoundDevices)); TI_DBG3(("tiINIGetExpDeviceHandleBySasAddress: agDevHandle %p\n", oneDeviceData->agDevHandle)); TI_DBG3(("tiINIGetExpDeviceHandleBySasAddress: Matched sas address: low %x and high %x\n", oneDeviceData->SASAddressID.sasAddressLo, oneDeviceData->SASAddressID.sasAddressHi)); return &(oneDeviceData->tiDeviceHandle); } } DeviceIndex++; DeviceListList = DeviceListList->flink; } /* else */ } return agNULL; } #ifdef TD_DISCOVER /***************************************************************************** *! \brief tdsaDiscover * * Purpose: This function is called to trigger topology discovery within a * portcontext. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param type: Type of discovery. It can be SAS or SATA. * \param option: discovery option. It can be Full or Incremental discovery. * * \return: * tiSuccess Discovery initiated. * tiError Discovery could not be initiated at this time. * * \note: * *****************************************************************************/ osGLOBAL bit32 tdsaDiscover( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, bit32 type, bit32 option ) { bit32 ret = tiError; TI_DBG3(("tdsaDiscover: start\n")); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaDiscover: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return ret; } switch ( option ) { case TDSA_DISCOVERY_OPTION_FULL_START: TI_DBG3(("tdsaDiscover: full\n")); onePortContext->discovery.type = TDSA_DISCOVERY_OPTION_FULL_START; if ( type == TDSA_DISCOVERY_TYPE_SAS ) { ret = tdsaSASFullDiscover(tiRoot, onePortContext); } #ifdef SATA_ENABLE else if ( type == TDSA_DISCOVERY_TYPE_SATA ) { if (onePortContext->discovery.status == DISCOVERY_SAS_DONE) { ret = tdsaSATAFullDiscover(tiRoot, onePortContext); } } #endif break; case TDSA_DISCOVERY_OPTION_INCREMENTAL_START: TI_DBG3(("tdsaDiscover: incremental\n")); onePortContext->discovery.type = TDSA_DISCOVERY_OPTION_INCREMENTAL_START; if ( type == TDSA_DISCOVERY_TYPE_SAS ) { TI_DBG3(("tdsaDiscover: incremental SAS\n")); ret = tdsaSASIncrementalDiscover(tiRoot, onePortContext); } #ifdef SATA_ENABLE else if ( type == TDSA_DISCOVERY_TYPE_SATA ) { if (onePortContext->discovery.status == DISCOVERY_SAS_DONE) { TI_DBG3(("tdsaDiscover: incremental SATA\n")); ret = tdsaSATAIncrementalDiscover(tiRoot, onePortContext); } } #endif break; case TDSA_DISCOVERY_OPTION_ABORT: TI_DBG1(("tdsaDiscover: abort\n")); break; default: break; } if (ret != tiSuccess) { TI_DBG1(("tdsaDiscover: fail, error 0x%x\n", ret)); } return ret; } /***************************************************************************** *! \brief tdsaSASFullDiscover * * Purpose: This function is called to trigger full SAS topology discovery * within a portcontext. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * * \return: * tiSuccess Discovery initiated. * tiError Discovery could not be initiated at this time. * * \note: * *****************************************************************************/ osGLOBAL bit32 tdsaSASFullDiscover( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; int i, j; bit8 portMaxRate; TI_DBG3(("tdsaSASFullDiscover: start\n")); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaSASFullDiscover: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return tiError; } /* 1. abort all IO; may need a new LL API since TD does not queue IO's 2. initializes(or invalidate) devices belonging to the port 3. onePortContext->DiscoveryState == ITD_DSTATE_STARTED 4. add directly connected one; if directed-SAS, spin-up 5. tdsaSASUpStreamDiscoverStart(agRoot, pPort, pDevice) */ /* invalidate all devices belonging to the portcontext except direct attached SAS/SATA */ DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); TI_DBG3(("tdsaSASFullDiscover: STARTED loop id %d\n", oneDeviceData->id)); TI_DBG3(("tdsaSASFullDiscover: STARTED loop sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaSASFullDiscover: STARTED loop sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); if (oneDeviceData->tdPortContext == onePortContext && (onePortContext->nativeSATAMode == agFALSE && onePortContext->directAttatchedSAS == agFALSE) ) { TI_DBG3(("tdsaSASFullDiscover: invalidate\n")); oneDeviceData->valid = agFALSE; oneDeviceData->processed = agFALSE; } else { TI_DBG3(("tdsaSASFullDiscover: not invalidate\n")); /* no changes */ } DeviceListList = DeviceListList->flink; } onePortContext->DiscoveryState = ITD_DSTATE_STARTED; /* nativeSATAMode is set in ossaHwCB() in link up */ if (onePortContext->nativeSATAMode == agFALSE) /* default: SAS and SAS/SATA mode */ { if (SA_IDFRM_GET_DEVICETTYPE(&onePortContext->sasIDframe) == SAS_END_DEVICE && SA_IDFRM_IS_SSP_TARGET(&onePortContext->sasIDframe) ) { for(i=0;iPhyIDList[i] == agTRUE) { for (j=0;jagRoot, agNULL, tdsaRotateQnumber(tiRoot, agNULL), i, AGSA_PHY_NOTIFY_ENABLE_SPINUP, agNULL); } break; } } } /* add the device 1. add device in TD layer 2. call saRegisterNewDevice 3. update agDevHandle in ossaDeviceRegistrationCB() */ portMaxRate = onePortContext->LinkRate; oneDeviceData = tdsaPortSASDeviceAdd( tiRoot, onePortContext, onePortContext->sasIDframe, agFALSE, portMaxRate, IT_NEXUS_TIMEOUT, 0, SAS_DEVICE_TYPE, agNULL, 0xFF ); if (oneDeviceData) { if (oneDeviceData->registered == agFALSE) { /* set the timer and wait till the device(directly attached. eg Expander) to be registered. Then, in tdsaDeviceRegistrationTimerCB(), tdsaSASUpStreamDiscoverStart() is called */ tdsaDeviceRegistrationTimer(tiRoot, onePortContext, oneDeviceData); } else { tdsaSASUpStreamDiscoverStart(tiRoot, onePortContext, oneDeviceData); } } #ifdef REMOVED // temp testing code tdsaReportManInfoSend(tiRoot, oneDeviceData); //end temp testing code #endif } else /* SATAOnlyMode*/ { tdsaSASDiscoverDone(tiRoot, onePortContext, tiSuccess); } return tiSuccess; } /***************************************************************************** *! \brief tdsaSASUpStreamDiscoverStart * * Purpose: This function is called to trigger upstream traverse in topology * within a portcontext. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASUpStreamDiscoverStart( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaDeviceData_t *oneDeviceData ) { tdsaExpander_t *oneExpander; TI_DBG3(("tdsaSASUpStreamDiscoverStart: start\n")); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaSASUpStreamDiscoverStart: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } /* 1. update discovery state to UP_STREAM 2. if (expander) add it 3. tdsaSASUpStreamDiscovering */ onePortContext->discovery.status = DISCOVERY_UP_STREAM; if ( (oneDeviceData->SASSpecDeviceType == SAS_EDGE_EXPANDER_DEVICE) || (oneDeviceData->SASSpecDeviceType == SAS_FANOUT_EXPANDER_DEVICE) ) { oneExpander = tdssSASDiscoveringExpanderAlloc(tiRoot, onePortContext, oneDeviceData); if ( oneExpander != agNULL) { /* (2.2.1) Add to discovering list */ tdssSASDiscoveringExpanderAdd(tiRoot, onePortContext, oneExpander); } else { TI_DBG1(("tdsaSASUpStreamDiscoverStart: failed to allocate expander or discovey aborted\n")); return; } } tdsaSASUpStreamDiscovering(tiRoot, onePortContext, oneDeviceData); return; } /***************************************************************************** *! \brief tdsaSASUpStreamDiscovering * * Purpose: For each expander in the expander list, this function sends SMP to * find information for discovery and calls * tdsaSASDownStreamDiscoverStart() function. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASUpStreamDiscovering( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaDeviceData_t *oneDeviceData ) { tdList_t *ExpanderList; tdsaExpander_t *oneNextExpander = agNULL; TI_DBG3(("tdsaSASUpStreamDiscovering: start\n")); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaSASUpStreamDiscovering: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } /* 1. find the next expander 2. if (there is next expander) send report general with saSMPStart else tdsaSASDownStreamDiscoverStart */ tdsaSingleThreadedEnter(tiRoot, TD_DISC_LOCK); if (TDLIST_EMPTY(&(onePortContext->discovery.discoveringExpanderList))) { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); TI_DBG3(("tdsaSASUpStreamDiscovering: should be the end\n")); oneNextExpander = agNULL; } else { TDLIST_DEQUEUE_FROM_HEAD(&ExpanderList, &(onePortContext->discovery.discoveringExpanderList)); oneNextExpander = TDLIST_OBJECT_BASE(tdsaExpander_t, linkNode, ExpanderList); TDLIST_ENQUEUE_AT_HEAD(&(oneNextExpander->linkNode), &(onePortContext->discovery.discoveringExpanderList)); tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); TI_DBG3(("tdssSASDiscoveringExpander tdsaSASUpStreamDiscovering: dequeue head\n")); TI_DBG3(("tdsaSASUpStreamDiscovering: expander id %d\n", oneNextExpander->id)); } if (oneNextExpander != agNULL) { tdsaReportGeneralSend(tiRoot, oneNextExpander->tdDevice); } else { TI_DBG3(("tdsaSASUpStreamDiscovering: No more expander list\n")); tdsaSASDownStreamDiscoverStart(tiRoot, onePortContext, oneDeviceData); } return; } /***************************************************************************** *! \brief tdsaSASDownStreamDiscoverStart * * Purpose: This function is called to trigger downstream traverse in topology * within a portcontext. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASDownStreamDiscoverStart( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaDeviceData_t *oneDeviceData ) { tdsaExpander_t *oneExpander; tdsaExpander_t *UpStreamExpander; TI_DBG3(("tdsaSASDownStreamDiscoverStart: start\n")); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaSASDownStreamDiscoverStart: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } /* 1. update discover state 2. if (expander is root) add it else just add it 3. tdsaSASDownStreamDiscovering */ /* set discovery status */ onePortContext->discovery.status = DISCOVERY_DOWN_STREAM; TI_DBG3(("tdsaSASDownStreamDiscoverStart: pPort=%p pDevice=%p\n", onePortContext, oneDeviceData)); /* If it's an expander */ if ( (oneDeviceData->SASSpecDeviceType == SAS_EDGE_EXPANDER_DEVICE) || (oneDeviceData->SASSpecDeviceType == SAS_FANOUT_EXPANDER_DEVICE)) { oneExpander = oneDeviceData->tdExpander; UpStreamExpander = oneExpander->tdUpStreamExpander; /* If the two expanders are the root of two edge sets; sub-to-sub */ if ( (UpStreamExpander != agNULL) && ( UpStreamExpander->tdUpStreamExpander == oneExpander ) ) { TI_DBG3(("tdsaSASDownStreamDiscoverStart: Root found pExpander=%p pUpStreamExpander=%p\n", oneExpander, UpStreamExpander)); //Saves the root expander onePortContext->discovery.RootExp = oneExpander; TI_DBG3(("tdsaSASDownStreamDiscoverStart: Root exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaSASDownStreamDiscoverStart: Root exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); /* reset up stream inform for pExpander */ oneExpander->tdUpStreamExpander = agNULL; /* Add the pExpander to discovering list */ tdssSASDiscoveringExpanderAdd(tiRoot, onePortContext, oneExpander); /* reset up stream inform for oneExpander */ UpStreamExpander->tdUpStreamExpander = agNULL; /* Add the UpStreamExpander to discovering list */ tdssSASDiscoveringExpanderAdd(tiRoot, onePortContext, UpStreamExpander); } /* If the two expanders are not the root of two edge sets. eg) one root */ else { //Saves the root expander onePortContext->discovery.RootExp = oneExpander; TI_DBG3(("tdsaSASDownStreamDiscoverStart: NO Root pExpander=%p\n", oneExpander)); TI_DBG3(("tdsaSASDownStreamDiscoverStart: Root exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaSASDownStreamDiscoverStart: Root exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); /* (2.2.2.1) Add the pExpander to discovering list */ tdssSASDiscoveringExpanderAdd(tiRoot, onePortContext, oneExpander); } } /* Continue down stream discovering */ tdsaSASDownStreamDiscovering(tiRoot, onePortContext, oneDeviceData); return; } /***************************************************************************** *! \brief tdsaSASDownStreamDiscovering * * Purpose: For each expander in the expander list, this function sends SMP to * find information for discovery and calls * tdsaSASDownStreamDiscoverStart() function. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASDownStreamDiscovering( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaDeviceData_t *oneDeviceData ) { tdsaExpander_t *NextExpander = agNULL; tdList_t *ExpanderList; TI_DBG3(("tdsaSASDownStreamDiscovering: start\n")); TI_DBG3(("tdsaSASDownStreamDiscovering: pPort=%p pDevice=%p\n", onePortContext, oneDeviceData)); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaSASDownStreamDiscovering: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } tdsaSingleThreadedEnter(tiRoot, TD_DISC_LOCK); if (TDLIST_EMPTY(&(onePortContext->discovery.discoveringExpanderList))) { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); TI_DBG3(("tdsaSASDownStreamDiscovering: should be the end\n")); NextExpander = agNULL; } else { - TDLIST_DEQUEUE_FROM_HEAD(&ExpanderList, &(onePortContext->discovery.discoveringExpanderList));; + TDLIST_DEQUEUE_FROM_HEAD(&ExpanderList, &(onePortContext->discovery.discoveringExpanderList)); NextExpander = TDLIST_OBJECT_BASE(tdsaExpander_t, linkNode, ExpanderList); - TDLIST_ENQUEUE_AT_HEAD(&(NextExpander->linkNode), &(onePortContext->discovery.discoveringExpanderList));; + TDLIST_ENQUEUE_AT_HEAD(&(NextExpander->linkNode), &(onePortContext->discovery.discoveringExpanderList)); tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); TI_DBG3(("tdssSASDiscoveringExpander tdsaSASDownStreamDiscovering: dequeue head\n")); TI_DBG3(("tdsaSASDownStreamDiscovering: expander id %d\n", NextExpander->id)); } /* If there is an expander for continue discoving */ if ( NextExpander != agNULL) { TI_DBG3(("tdsaSASDownStreamDiscovering: Found pNextExpander=%p\n, discoveryStatus=0x%x", NextExpander, onePortContext->discovery.status)); switch (onePortContext->discovery.status) { /* If the discovery status is DISCOVERY_DOWN_STREAM */ case DISCOVERY_DOWN_STREAM: /* Send report general for the next expander */ TI_DBG3(("tdsaSASDownStreamDiscovering: DownStream pNextExpander->pDevice=%p\n", NextExpander->tdDevice)); tdsaReportGeneralSend(tiRoot, NextExpander->tdDevice); break; /* If the discovery status is DISCOVERY_CONFIG_ROUTING */ case DISCOVERY_CONFIG_ROUTING: case DISCOVERY_REPORT_PHY_SATA: /* set discovery status */ onePortContext->discovery.status = DISCOVERY_DOWN_STREAM; TI_DBG3(("tdsaSASDownStreamDiscovering: pPort->discovery.status=DISCOVERY_CONFIG_ROUTING, nake it DOWN_STREAM\n")); /* If not the last phy */ if ( NextExpander->discoveringPhyId < NextExpander->tdDevice->numOfPhys ) { TI_DBG3(("tdsaSASDownStreamDiscovering: pNextExpander->discoveringPhyId=0x%x pNextExpander->pDevice->numOfPhys=0x%x. Send More Discover\n", NextExpander->discoveringPhyId, NextExpander->tdDevice->numOfPhys)); /* Send discover for the next expander */ tdsaDiscoverSend(tiRoot, NextExpander->tdDevice); } /* If it's the last phy */ else { TI_DBG3(("tdsaSASDownStreamDiscovering: Last Phy, remove expander%p start DownStream=%p\n", NextExpander, NextExpander->tdDevice)); tdssSASDiscoveringExpanderRemove(tiRoot, onePortContext, NextExpander); tdsaSASDownStreamDiscovering(tiRoot, onePortContext, NextExpander->tdDevice); } break; default: TI_DBG3(("tdsaSASDownStreamDiscovering: *** Unknown pPort->discovery.status=0x%x\n", onePortContext->discovery.status)); } } /* If no expander for continue discoving */ else { TI_DBG3(("tdsaSASDownStreamDiscovering: No more expander DONE\n")); /* discover done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiSuccess); } return; } /***************************************************************************** *! \brief tdsaCleanAllExp * * Purpose: This function cleans up expander data structures after discovery * is complete. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaCleanAllExp( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdList_t *ExpanderList; tdsaExpander_t *tempExpander; tdsaPortContext_t *tmpOnePortContext = onePortContext; TI_DBG3(("tdssSASDiscoveringExpander tdsaCleanAllExp: start\n")); TI_DBG3(("tdssSASDiscoveringExpander tdsaCleanAllExp: before all clean up\n")); tdsaDumpAllFreeExp(tiRoot); /* clean up UpdiscoveringExpanderList*/ TI_DBG3(("tdssSASDiscoveringExpander tdsaCleanAllExp: clean discoveringExpanderList\n")); tdsaSingleThreadedEnter(tiRoot, TD_DISC_LOCK); if (!TDLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; while (ExpanderList != &(tmpOnePortContext->discovery.discoveringExpanderList)) { tempExpander = TDLIST_OBJECT_BASE(tdsaExpander_t, linkNode, ExpanderList); TI_DBG3(("tdssSASDiscoveringExpander tdsaCleanAllExp: exp addrHi 0x%08x\n", tempExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdssSASDiscoveringExpander tdsaCleanAllExp: exp addrLo 0x%08x\n", tempExpander->tdDevice->SASAddressID.sasAddressLo)); /* putting back to the free pool */ tdsaSingleThreadedEnter(tiRoot, TD_DISC_LOCK); TDLIST_DEQUEUE_THIS(&(tempExpander->linkNode)); TDLIST_ENQUEUE_AT_TAIL(&(tempExpander->linkNode), &(tdsaAllShared->freeExpanderList)); if (TDLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); break; } else { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); } ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; // ExpanderList = ExpanderList->flink; } } else { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); TI_DBG3(("tdssSASDiscoveringExpander tdsaCleanAllExp: empty discoveringExpanderList\n")); } /* reset UpdiscoveringExpanderList */ TDLIST_INIT_HDR(&(tmpOnePortContext->discovery.UpdiscoveringExpanderList)); TI_DBG3(("tdssSASDiscoveringExpander tdsaCleanAllExp: after all clean up\n")); tdsaDumpAllFreeExp(tiRoot); return; } /***************************************************************************** *! \brief tdsaFreeAllExp * * Purpose: This function frees up expander data structures as a part of * soft reset. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaFreeAllExp( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdList_t *ExpanderList; tdsaExpander_t *tempExpander; tdsaPortContext_t *tmpOnePortContext = onePortContext; TI_DBG3(("tdssSASDiscoveringExpander tdsaFreeAllExp: start\n")); TI_DBG3(("tdssSASDiscoveringExpander tdsaFreeAllExp: before all clean up\n")); tdsaDumpAllFreeExp(tiRoot); /* clean up UpdiscoveringExpanderList*/ TI_DBG3(("tdssSASDiscoveringExpander tdsaFreeAllExp: clean discoveringExpanderList\n")); tdsaSingleThreadedEnter(tiRoot, TD_DISC_LOCK); if (!TDLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; while (ExpanderList != &(tmpOnePortContext->discovery.discoveringExpanderList)) { tempExpander = TDLIST_OBJECT_BASE(tdsaExpander_t, linkNode, ExpanderList); TI_DBG3(("tdssSASDiscoveringExpander tdsaFreeAllExp: exp addrHi 0x%08x\n", tempExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdssSASDiscoveringExpander tdsaFreeAllExp: exp addrLo 0x%08x\n", tempExpander->tdDevice->SASAddressID.sasAddressLo)); /* putting back to the free pool */ tdsaSingleThreadedEnter(tiRoot, TD_DISC_LOCK); TDLIST_DEQUEUE_THIS(&(tempExpander->linkNode)); TDLIST_ENQUEUE_AT_TAIL(&(tempExpander->linkNode), &(tdsaAllShared->freeExpanderList)); if (TDLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); break; } else { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); } ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; // ExpanderList = ExpanderList->flink; } } else { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); TI_DBG3(("tdssSASDiscoveringExpander tdsaFreeAllExp: empty discoveringExpanderList\n")); } /* reset UpdiscoveringExpanderList */ TDLIST_INIT_HDR(&(tmpOnePortContext->discovery.UpdiscoveringExpanderList)); return; } /***************************************************************************** *! \brief tdsaResetValidDeviceData * * Purpose: This function resets valid and valid2 field for discovered devices * in the device list. This is used only in incremental discovery. * * \param agRoot : Pointer to chip/driver Instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaResetValidDeviceData( agsaRoot_t *agRoot, tdsaPortContext_t *onePortContext ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdList_t *DeviceListList; tdsaDeviceData_t *oneDeviceData; TI_DBG3(("tdsaResetValidDeviceData: start\n")); tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainDeviceList))) { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); TI_DBG1(("tdsaResetValidDeviceData: empty device list\n")); } else { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); oneDeviceData->valid = oneDeviceData->valid2; oneDeviceData->valid2 = agFALSE; DeviceListList = DeviceListList->flink; TI_DBG3(("tdsaResetValidDeviceData: valid %d valid2 %d\n", oneDeviceData->valid, oneDeviceData->valid2)); } } return; } /***************************************************************************** *! \brief tdssReportChanges * * Purpose: This function goes throuhg device list and finds out whether * a device is removed and newly added. Based on the findings, * this function notifies OS layer of the change. * * \param agRoot : Pointer to chip/driver Instance. * \param onePortContext: Pointer to the portal context instance. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdssReportChanges( agsaRoot_t *agRoot, tdsaPortContext_t *onePortContext ) { tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; bit32 added = agFALSE, removed = agFALSE; TI_DBG1(("tdssReportChanges: start\n")); tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainDeviceList))) { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); TI_DBG1(("tdssReportChanges: empty device list\n")); return; } else { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); } DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); TI_DBG3(("tdssReportChanges: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdssReportChanges: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); if ( oneDeviceData->tdPortContext == onePortContext) { TI_DBG3(("tdssReportChanges: right portcontext\n")); if ( (oneDeviceData->valid == agTRUE) && (oneDeviceData->valid2 == agTRUE) ) { TI_DBG3(("tdssReportChanges: same\n")); /* reset valid bit */ oneDeviceData->valid = oneDeviceData->valid2; oneDeviceData->valid2 = agFALSE; } else if ( (oneDeviceData->valid == agTRUE) && (oneDeviceData->valid2 == agFALSE) ) { TI_DBG3(("tdssReportChanges: removed\n")); removed = agTRUE; /* reset valid bit */ oneDeviceData->valid = oneDeviceData->valid2; oneDeviceData->valid2 = agFALSE; /* reset NumOfFCA */ oneDeviceData->satDevData.NumOfFCA = 0; if ( (oneDeviceData->registered == agTRUE) && ( DEVICE_IS_SSP_TARGET(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData) || DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_SMP_TARGET(oneDeviceData) ) ) { tdsaAbortAll(tiRoot, agRoot, oneDeviceData); } else if (oneDeviceData->registered == agTRUE) { TI_DBG1(("tdssReportChanges: calling saDeregisterDeviceHandle, did %d\n", oneDeviceData->id)); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, 0); } oneDeviceData->registered = agFALSE; #ifdef REMOVED /* don't remove device from the device list. May screw up ordering of report */ TDLIST_DEQUEUE_THIS(&(oneDeviceData->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceList)); #endif } else if ( (oneDeviceData->valid == agFALSE) && (oneDeviceData->valid2 == agTRUE) ) { TI_DBG3(("tdssReportChanges: added\n")); added = agTRUE; /* reset valid bit */ oneDeviceData->valid = oneDeviceData->valid2; oneDeviceData->valid2 = agFALSE; } else { TI_DBG6(("tdssReportChanges: else\n")); } } else { TI_DBG1(("tdssReportChanges: different portcontext\n")); } DeviceListList = DeviceListList->flink; } /* arrival or removal at once */ if (added == agTRUE) { TI_DBG3(("tdssReportChanges: added at the end\n")); #ifdef AGTIAPI_CTL if (tdsaAllShared->SASConnectTimeLimit) tdsaCTLSet(tiRoot, onePortContext, tiIntrEventTypeDeviceChange, tiDeviceArrival); else #endif ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceArrival, agNULL ); } if (removed == agTRUE) { TI_DBG3(("tdssReportChanges: removed at the end\n")); ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceRemoval, agNULL ); } if (onePortContext->discovery.forcedOK == agTRUE && added == agFALSE && removed == agFALSE) { TI_DBG1(("tdssReportChanges: missed chance to report. forced to report OK\n")); onePortContext->discovery.forcedOK = agFALSE; ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscOK, agNULL ); } if (added == agFALSE && removed == agFALSE) { TI_DBG3(("tdssReportChanges: the same\n")); } return; } /***************************************************************************** *! \brief tdssReportRemovals * * Purpose: This function goes through device list and removes all devices * belong to the portcontext. This function also deregiters those * devices. This function is called in case of incremental discovery * failure. * * \param agRoot : Pointer to chip/driver Instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdssReportRemovals( agsaRoot_t *agRoot, tdsaPortContext_t *onePortContext, bit32 flag ) { tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; bit32 removed = agFALSE; agsaEventSource_t *eventSource; bit32 PhyID; bit32 HwAckSatus; agsaDevHandle_t *agDevHandle = agNULL; TI_DBG2(("tdssReportRemovals: start\n")); /* in case nothing was registered */ PhyID = onePortContext->eventPhyID; if (tdsaAllShared->eventSource[PhyID].EventValid == agTRUE && onePortContext->RegisteredDevNums == 0 && PhyID != 0xFF ) { TI_DBG2(("tdssReportRemovals: calling saHwEventAck\n")); eventSource = &(tdsaAllShared->eventSource[PhyID].Source); HwAckSatus = saHwEventAck( agRoot, agNULL, /* agContext */ 0, eventSource, /* agsaEventSource_t */ 0, 0 ); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("tdssReportRemovals: failing in saHwEventAck; status %d\n", HwAckSatus)); } /* toggle */ tdsaAllShared->eventSource[PhyID].EventValid = agFALSE; if (onePortContext->valid == agFALSE) { /* put device belonging to the port to freedevice list */ DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if (oneDeviceData->tdPortContext == onePortContext) { osti_memset(&(oneDeviceData->satDevData.satIdentifyData), 0xFF, sizeof(agsaSATAIdentifyData_t)); tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); TDLIST_DEQUEUE_THIS(&(oneDeviceData->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceList)); if (TDLIST_EMPTY(&(tdsaAllShared->MainDeviceList))) { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); break; } tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); DeviceListList = tdsaAllShared->MainDeviceList.flink; } else { DeviceListList = DeviceListList->flink; } } /* while */ tdsaPortContextReInit(tiRoot, onePortContext); /* put all devices belonging to the onePortContext back to the free link */ tdsaSingleThreadedEnter(tiRoot, TD_PORT_LOCK); TDLIST_DEQUEUE_THIS(&(onePortContext->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(onePortContext->FreeLink), &(tdsaAllShared->FreePortContextList)); tdsaSingleThreadedLeave(tiRoot, TD_PORT_LOCK); } } else { tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainDeviceList))) { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); TI_DBG1(("tdssReportRemovals: 1st empty device list\n")); return; } else { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); } DeviceListList = tdsaAllShared->MainDeviceList.flink; /* needs to clean up devices which were not removed in ossaDeregisterDeviceHandleCB() since port was in valid (discovery error) */ while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { TI_DBG1(("tdssReportRemovals: oneDeviceData is NULL!!!\n")); return; } TI_DBG2(("tdssReportRemovals: 1st loop did %d\n", oneDeviceData->id)); TI_DBG2(("tdssReportRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("tdssReportRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG2(("tdssReportRemovals: valid %d\n", oneDeviceData->valid)); TI_DBG2(("tdssReportRemovals: valid2 %d\n", oneDeviceData->valid2)); TI_DBG2(("tdssReportRemovals: directlyAttached %d\n", oneDeviceData->directlyAttached)); TI_DBG2(("tdssReportRemovals: registered %d\n", oneDeviceData->registered)); if ( oneDeviceData->tdPortContext == onePortContext && oneDeviceData->valid == agFALSE && oneDeviceData->valid2 == agFALSE && oneDeviceData->registered == agFALSE ) { /* remove oneDevice from MainLink */ TI_DBG2(("tdssReportRemovals: delete from MainLink\n")); agDevHandle = oneDeviceData->agDevHandle; tdsaDeviceDataReInit(tiRoot, oneDeviceData); //save agDevHandle and tdPortContext oneDeviceData->agDevHandle = agDevHandle; oneDeviceData->tdPortContext = onePortContext; osti_memset(&(oneDeviceData->satDevData.satIdentifyData), 0xFF, sizeof(agsaSATAIdentifyData_t)); tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); TDLIST_DEQUEUE_THIS(&(oneDeviceData->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceList)); tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); DeviceListList = tdsaAllShared->MainDeviceList.flink; tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainDeviceList))) { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); break; } else { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); } } else { DeviceListList = DeviceListList->flink; } } /* while */ tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainDeviceList))) { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); TI_DBG1(("tdssReportRemovals: 2nd empty device list\n")); return; } else { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); } DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if (oneDeviceData == agNULL) { TI_DBG1(("tdssReportRemovals: oneDeviceData is NULL!!!\n")); return; } TI_DBG2(("tdssReportRemovals: loop did %d\n", oneDeviceData->id)); TI_DBG2(("tdssReportRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("tdssReportRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG2(("tdssReportRemovals: valid %d\n", oneDeviceData->valid)); TI_DBG2(("tdssReportRemovals: valid2 %d\n", oneDeviceData->valid2)); TI_DBG2(("tdssReportRemovals: directlyAttached %d\n", oneDeviceData->directlyAttached)); TI_DBG2(("tdssReportRemovals: registered %d\n", oneDeviceData->registered)); if ( oneDeviceData->tdPortContext == onePortContext) { TI_DBG2(("tdssReportRemovals: right portcontext pid %d\n", onePortContext->id)); if (oneDeviceData->valid == agTRUE && oneDeviceData->registered == agTRUE) { TI_DBG2(("tdssReportRemovals: removing\n")); /* notify only reported devices to OS layer*/ if ( DEVICE_IS_SSP_TARGET(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData) || DEVICE_IS_SATA_DEVICE(oneDeviceData) ) { removed = agTRUE; } if ( (oneDeviceData->registered == agTRUE) && ( DEVICE_IS_SSP_TARGET(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData) || DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_SMP_TARGET(oneDeviceData) ) ) { /* all targets except expanders */ TI_DBG2(("tdssReportRemovals: calling tdsaAbortAll\n")); TI_DBG2(("tdssReportRemovals: did %d\n", oneDeviceData->id)); TI_DBG2(("tdssReportRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("tdssReportRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); tdsaAbortAll(tiRoot, agRoot, oneDeviceData); } else if (oneDeviceData->registered == agTRUE) { /* expanders */ TI_DBG1(("tdssReportRemovals: calling saDeregisterDeviceHandle, did %d\n", oneDeviceData->id)); TI_DBG2(("tdssReportRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("tdssReportRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, 0); } /* reset valid bit */ oneDeviceData->valid = agFALSE; oneDeviceData->valid2 = agFALSE; oneDeviceData->registered = agFALSE; /* reset NumOfFCA */ oneDeviceData->satDevData.NumOfFCA = 0; } /* called by port invalid case */ if (flag == agTRUE) { oneDeviceData->tdPortContext = agNULL; TI_DBG1(("tdssReportRemovals: nulling-out tdPortContext; oneDeviceData did %d\n", oneDeviceData->id)); } #ifdef REMOVED /* removed */ /* directly attached SATA -> always remove it */ if (oneDeviceData->DeviceType == TD_SATA_DEVICE && oneDeviceData->directlyAttached == agTRUE) { TI_DBG1(("tdssReportRemovals: device did %d\n", oneDeviceData->id)); TDLIST_DEQUEUE_THIS(&(oneDeviceData->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceLis)); DeviceListList = tdsaAllShared->MainDeviceList.flink; if (TDLIST_EMPTY(&(tdsaAllShared->MainDeviceList))) { break; } } else { DeviceListList = DeviceListList->flink; } #endif /* REMOVED */ DeviceListList = DeviceListList->flink; } else { if (oneDeviceData->tdPortContext != agNULL) { TI_DBG2(("tdssReportRemovals: different portcontext; oneDeviceData->tdPortContext pid %d oneportcontext pid %d oneDeviceData did %d\n", oneDeviceData->tdPortContext->id, onePortContext->id, oneDeviceData->id)); } else { TI_DBG1(("tdssReportRemovals: different portcontext; oneDeviceData->tdPortContext pid NULL oneportcontext pid %d oneDeviceData did %d\n", onePortContext->id, oneDeviceData->id)); } DeviceListList = DeviceListList->flink; } } if (removed == agTRUE) { TI_DBG2(("tdssReportRemovals: removed at the end\n")); ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceRemoval, agNULL ); } } /* big else */ return; } /* changes valid and valid2 based on discovery type */ osGLOBAL void tdssInternalRemovals( agsaRoot_t *agRoot, tdsaPortContext_t *onePortContext ) { tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; TI_DBG2(("tdssInternalRemovals: start\n")); tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainDeviceList))) { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); TI_DBG1(("tdssInternalRemovals: empty device list\n")); return; } else { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); } DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); TI_DBG3(("tdssInternalRemovals: loop did %d\n", oneDeviceData->id)); TI_DBG3(("tdssInternalRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdssInternalRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG3(("tdssInternalRemovals: valid %d\n", oneDeviceData->valid)); TI_DBG3(("tdssInternalRemovals: valid2 %d\n", oneDeviceData->valid2)); TI_DBG3(("tdssInternalRemovals: directlyAttached %d\n", oneDeviceData->directlyAttached)); TI_DBG3(("tdssInternalRemovals: registered %d\n", oneDeviceData->registered)); if ( oneDeviceData->tdPortContext == onePortContext) { TI_DBG3(("tdssInternalRemovals: right portcontext pid %d\n", onePortContext->id)); if (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_INCREMENTAL_START) { TI_DBG3(("tdssInternalRemovals: incremental discovery\n")); oneDeviceData->valid2 = agFALSE; } else { TI_DBG3(("tdssInternalRemovals: full discovery\n")); oneDeviceData->valid = agFALSE; } DeviceListList = DeviceListList->flink; } else { if (oneDeviceData->tdPortContext != agNULL) { TI_DBG3(("tdssInternalRemovals: different portcontext; oneDeviceData->tdPortContext pid %d oneportcontext pid %d\n", oneDeviceData->tdPortContext->id, onePortContext->id)); } else { TI_DBG3(("tdssInternalRemovals: different portcontext; oneDeviceData->tdPortContext pid NULL oneportcontext pid %d\n", onePortContext->id)); } DeviceListList = DeviceListList->flink; } } return; } /* resets all valid and valid2 */ osGLOBAL void tdssDiscoveryErrorRemovals( agsaRoot_t *agRoot, tdsaPortContext_t *onePortContext ) { tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; TI_DBG1(("tdssDiscoveryErrorRemovals: start\n")); tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); if (TDLIST_EMPTY(&(tdsaAllShared->MainDeviceList))) { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); TI_DBG1(("tdssDiscoveryErrorRemovals: empty device list\n")); return; } else { tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); } DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); TI_DBG2(("tdssDiscoveryErrorRemovals: loop did %d\n", oneDeviceData->id)); TI_DBG2(("tdssDiscoveryErrorRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("tdssDiscoveryErrorRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG2(("tdssDiscoveryErrorRemovals: valid %d\n", oneDeviceData->valid)); TI_DBG2(("tdssDiscoveryErrorRemovals: valid2 %d\n", oneDeviceData->valid2)); TI_DBG2(("tdssDiscoveryErrorRemovals: directlyAttached %d\n", oneDeviceData->directlyAttached)); TI_DBG2(("tdssDiscoveryErrorRemovals: registered %d\n", oneDeviceData->registered)); if ( oneDeviceData->tdPortContext == onePortContext) { TI_DBG2(("tdssDiscoveryErrorRemovals: right portcontext pid %d\n", onePortContext->id)); oneDeviceData->valid = agFALSE; oneDeviceData->valid2 = agFALSE; /* reset NumOfFCA */ oneDeviceData->satDevData.NumOfFCA = 0; if ( (oneDeviceData->registered == agTRUE) && ( DEVICE_IS_SSP_TARGET(oneDeviceData) || DEVICE_IS_STP_TARGET(oneDeviceData) || DEVICE_IS_SATA_DEVICE(oneDeviceData) || DEVICE_IS_SMP_TARGET(oneDeviceData) ) ) { /* all targets other than expanders */ TI_DBG2(("tdssDiscoveryErrorRemovals: calling tdsaAbortAll\n")); TI_DBG2(("tdssDiscoveryErrorRemovals: did %d\n", oneDeviceData->id)); TI_DBG2(("tdssDiscoveryErrorRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("tdssDiscoveryErrorRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); tdsaAbortAll(tiRoot, agRoot, oneDeviceData); } else if (oneDeviceData->registered == agTRUE) { /* expanders */ TI_DBG2(("tdssDiscoveryErrorRemovals: calling saDeregisterDeviceHandle\n")); TI_DBG2(("tdssDiscoveryErrorRemovals: did %d\n", oneDeviceData->id)); TI_DBG2(("tdssDiscoveryErrorRemovals: sasAddrHi 0x%08x \n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("tdssDiscoveryErrorRemovals: sasAddrLo 0x%08x \n", oneDeviceData->SASAddressID.sasAddressLo)); saDeregisterDeviceHandle(agRoot, agNULL, oneDeviceData->agDevHandle, 0); } oneDeviceData->registered = agFALSE; DeviceListList = DeviceListList->flink; } else { if (oneDeviceData->tdPortContext != agNULL) { TI_DBG2(("tdssDiscoveryErrorRemovals: different portcontext; oneDeviceData->tdPortContext pid %d oneportcontext pid %d\n", oneDeviceData->tdPortContext->id, onePortContext->id)); } else { TI_DBG2(("tdssDiscoveryErrorRemovals: different portcontext; oneDeviceData->tdPortContext pid NULL oneportcontext pid %d\n", onePortContext->id)); } DeviceListList = DeviceListList->flink; } } return; } /***************************************************************************** *! \brief tdsaSASDiscoverAbort * * Purpose: This function aborts on-going discovery. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * * \return: * None * * \note: * *****************************************************************************/ /* this called when discovery is aborted aborted by whom */ osGLOBAL void tdsaSASDiscoverAbort( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { TI_DBG2(("tdsaSASDiscoverAbort: start\n")); TI_DBG2(("tdsaSASDiscoverAbort: pPort=%p DONE\n", onePortContext)); TI_DBG2(("tdsaSASDiscoverAbort: DiscoveryState %d\n", onePortContext->DiscoveryState)); onePortContext->DiscoveryState = ITD_DSTATE_COMPLETED; /* clean up expanders data strucures; move to free exp when device is cleaned */ tdsaCleanAllExp(tiRoot, onePortContext); /* unregister devices */ tdssReportRemovals(onePortContext->agRoot, onePortContext, agFALSE ); } #ifdef AGTIAPI_CTL STATIC void tdsaCTLNextDevice( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdIORequest_t *tdIORequest, tdList_t *DeviceList); STATIC void tdsaCTLIOCompleted( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, bit32 agIOInfoLen, void *agParam, bit16 sspTag, bit32 agOtherInfo) { tiRoot_t *tiRoot = (tiRoot_t*) ((tdsaRootOsData_t*)agRoot->osData)->tiRoot; tdIORequestBody_t *tdIORequestBody; tdIORequest_t *tdIORequest; tdsaDeviceData_t *oneDeviceData; tdIORequest = (tdIORequest_t *)agIORequest->osData; tdIORequestBody = &tdIORequest->tdIORequestBody; tdIORequestBody->ioCompleted = agTRUE; tdIORequestBody->ioStarted = agFALSE; oneDeviceData = (tdsaDeviceData_t *)tdIORequestBody->tiDevHandle->tdData; TI_DBG6(("tdsaCTLIOCompleted: stat x%x len %d id %d\n", agIOStatus, agIOInfoLen, oneDeviceData->id)); //if ((agIOStatus == OSSA_IO_SUCCESS) && (agIOInfoLen == 0)) /* SCSI command was completed OK, this is the normal path. */ if (agIOInfoLen) { TI_DBG6(("tdsaCTLIOCompleted: SASDevAddr 0x%x / 0x%x PhyId 0x%x WARN " "setting CTL\n", oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo, oneDeviceData->SASAddressID.phyIdentifier)); tdhexdump("tdsaCTLIOCompleted: response", (bit8 *)agParam, agIOInfoLen); } tdsaCTLNextDevice(tiRoot, oneDeviceData->tdPortContext, tdIORequest, oneDeviceData->MainLink.flink); } /* tdsaCTLIOCompleted */ STATIC int tdsaCTLModeSelect( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle, tdIORequest_t *tdIORequest) { tiIORequest_t *tiIORequest; tdsaDeviceData_t *oneDeviceData; agsaRoot_t *agRoot = agNULL; tdsaRoot_t *tdsaRoot = (tdsaRoot_t*)tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t*) &tdsaRoot->tdsaAllShared; agsaIORequest_t *agIORequest = agNULL; agsaDevHandle_t *agDevHandle = agNULL; agsaSASRequestBody_t *agSASRequestBody = agNULL; bit32 tiStatus; bit32 saStatus; tdIORequestBody_t *tdIORequestBody; agsaSSPInitiatorRequest_t *agSSPInitiatorRequest; unsigned char *virtAddr; tiSgl_t agSgl; static unsigned char cdb[6] = { MODE_SELECT, PAGE_FORMAT, 0, 0, DR_MODE_PG_SZ }; virtAddr = (unsigned char*)tdIORequest->virtAddr; virtAddr[0] = DR_MODE_PG_CODE; /* Disconnect-Reconnect mode page code */ virtAddr[1] = DR_MODE_PG_LENGTH; /* DR Mode pg length */ virtAddr[8] = tdsaAllShared->SASConnectTimeLimit >> 8; virtAddr[9] = tdsaAllShared->SASConnectTimeLimit & 0xff; oneDeviceData = (tdsaDeviceData_t*)tiDeviceHandle->tdData; TI_DBG4(("tdsaCTLModeSelect: id %d\n", oneDeviceData->id)); agRoot = oneDeviceData->agRoot; agDevHandle = oneDeviceData->agDevHandle; tiIORequest = &tdIORequest->tiIORequest; tdIORequestBody = &tdIORequest->tdIORequestBody; //tdIORequestBody->IOCompletionFunc = tdsaCTLIOCompleted;//itdssIOCompleted; tdIORequestBody->tiDevHandle = tiDeviceHandle; tdIORequestBody->IOType.InitiatorRegIO.expDataLength = DR_MODE_PG_SZ; agIORequest = &tdIORequestBody->agIORequest; agIORequest->sdkData = agNULL; /* LL takes care of this */ agSASRequestBody = &(tdIORequestBody->transport.SAS.agSASRequestBody); agSSPInitiatorRequest = &(agSASRequestBody->sspInitiatorReq); osti_memcpy(agSSPInitiatorRequest->sspCmdIU.cdb, cdb, 6); agSSPInitiatorRequest->dataLength = DR_MODE_PG_SZ; agSSPInitiatorRequest->firstBurstSize = 0; tdIORequestBody->agRequestType = AGSA_SSP_INIT_WRITE; tdIORequestBody->ioStarted = agTRUE; tdIORequestBody->ioCompleted = agFALSE; agSgl.lower = BIT32_TO_LEBIT32(tdIORequest->physLower32); #if (BITS_PER_LONG > 32) agSgl.upper = BIT32_TO_LEBIT32(tdIORequest->physUpper32); #else agSgl1.upper = 0; #endif agSgl.type = BIT32_TO_LEBIT32(tiSgl); agSgl.len = BIT32_TO_LEBIT32(DR_MODE_PG_SZ); /* initializes "agsaSgl_t agSgl" of "agsaDifSSPInitiatorRequest_t" */ tiStatus = itdssIOPrepareSGL(tiRoot, tdIORequestBody, &agSgl, tdIORequest->virtAddr); if (tiStatus != tiSuccess) { TI_DBG1(("tdsaCTLModeSelect: can't get SGL\n")); ostiFreeMemory(tiRoot, tdIORequest->osMemHandle2, DR_MODE_PG_SZ); ostiFreeMemory(tiRoot, tdIORequest->osMemHandle, sizeof(*tdIORequest)); return tiError; } saStatus = saSSPStart(agRoot, agIORequest, tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, AGSA_SSP_INIT_WRITE, agSASRequestBody, agNULL, &tdsaCTLIOCompleted); if (saStatus == AGSA_RC_SUCCESS) { tiStatus = tiSuccess; TI_DBG4(("tdsaCTLModeSelect: saSSPStart OK\n")); } else { tdIORequestBody->ioStarted = agFALSE; tdIORequestBody->ioCompleted = agTRUE; if (saStatus == AGSA_RC_BUSY) { tiStatus = tiBusy; TI_DBG4(("tdsaCTLModeSelect: saSSPStart busy\n")); } else { tiStatus = tiError; TI_DBG4(("tdsaCTLModeSelect: saSSPStart Error\n")); } tdsaCTLNextDevice(tiRoot, oneDeviceData->tdPortContext, tdIORequest, oneDeviceData->MainLink.flink); } return tiStatus; } /* tdsaCTLModeSelect */ STATIC void tdsaCTLNextDevice( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdIORequest_t *tdIORequest, tdList_t *DeviceList) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *)tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdsaDeviceData_t *oneDeviceData; tiIntrEventType_t eventType; bit32 eventStatus; int rc; /* * From the device list, returns only valid devices */ for (; DeviceList && DeviceList != &(tdsaAllShared->MainDeviceList); DeviceList = DeviceList->flink) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceList); TI_DBG6(("tdsaCTLNextDevice: devHandle %p\n", &(oneDeviceData->tiDeviceHandle))); if (oneDeviceData->tdPortContext != onePortContext) continue; if ((oneDeviceData->discovered == agFALSE) && (oneDeviceData->registered == agTRUE) && DEVICE_IS_SSP_TARGET(oneDeviceData) && !DEVICE_IS_SSP_INITIATOR(oneDeviceData)) { oneDeviceData->discovered = agTRUE; rc = tdsaCTLModeSelect(tiRoot, &oneDeviceData->tiDeviceHandle, tdIORequest); TI_DBG1(("tdsaCTLNextDevice: ModeSelect ret %d\n", rc)); return; } } TI_DBG2(("tdsaCTLNextDevice: no more devices found\n")); eventType = tdIORequest->eventType; eventStatus = tdIORequest->eventStatus; /* no more devices, free the memory */ ostiFreeMemory(tiRoot, tdIORequest->osMemHandle2, DR_MODE_PG_SZ); ostiFreeMemory(tiRoot, tdIORequest->osMemHandle, sizeof(*tdIORequest)); /* send Discovery Done event */ ostiInitiatorEvent(tiRoot, onePortContext->tiPortalContext, agNULL, eventType, eventStatus, agNULL); } /* tdsaCTLNextDevice */ osGLOBAL void tdsaCTLSet( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tiIntrEventType_t eventType, bit32 eventStatus) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *)tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdIORequest_t *tdIORequest; tdIORequestBody_t *tdIORequestBody; tiIORequest_t *tiIORequest; bit32 memAllocStatus; void *osMemHandle; bit32 physUpper32; bit32 physLower32; TI_DBG2(("tdsaCTLSet: tiPortalContext pid %d etyp %x stat %x\n", onePortContext->id, eventType, eventStatus)); if (onePortContext->DiscoveryState != ITD_DSTATE_COMPLETED) { TI_DBG1(("tdsaCTLSet: discovery not completed\n")); return; } /* use the same memory for all valid devices */ memAllocStatus = ostiAllocMemory(tiRoot, &osMemHandle, (void **)&tdIORequest, &physUpper32, &physLower32, 8, sizeof(*tdIORequest), agTRUE); if (memAllocStatus != tiSuccess || tdIORequest == agNULL) { TI_DBG1(("tdsaCTLSet: ostiAllocMemory failed\n")); return;// tiError; } osti_memset(tdIORequest, 0, sizeof(*tdIORequest)); tdIORequest->osMemHandle = osMemHandle; tdIORequest->eventType = eventType; tdIORequest->eventStatus = eventStatus; tiIORequest = &tdIORequest->tiIORequest; tdIORequestBody = &tdIORequest->tdIORequestBody; /* save context if we need to abort later */ tiIORequest->tdData = tdIORequestBody; tdIORequestBody->IOCompletionFunc = NULL;//itdssIOCompleted; tdIORequestBody->tiIORequest = tiIORequest; tdIORequestBody->IOType.InitiatorRegIO.expDataLength = 16; tdIORequestBody->agIORequest.osData = (void *)tdIORequest; //tdIORequestBody; memAllocStatus = ostiAllocMemory(tiRoot, &tdIORequest->osMemHandle2, (void **)&tdIORequest->virtAddr, &tdIORequest->physUpper32, &tdIORequest->physLower32, 8, DR_MODE_PG_SZ, agFALSE); if (memAllocStatus != tiSuccess || tdIORequest == agNULL) { TI_DBG1(("tdsaCTLSet: ostiAllocMemory noncached failed\n")); ostiFreeMemory(tiRoot, tdIORequest->osMemHandle, sizeof(*tdIORequest)); return;// tiError; } osti_memset(tdIORequest->virtAddr, 0, DR_MODE_PG_SZ); tdsaCTLNextDevice(tiRoot, onePortContext, tdIORequest, tdsaAllShared->MainDeviceList.flink); } /* tdsaCTLSet*/ #endif /***************************************************************************** *! \brief tdsaSASDiscoverDone * * Purpose: This function called to finish up SAS discovery. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASDiscoverDone( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, bit32 flag ) { #ifndef SATA_ENABLE tdsaRoot_t *tdsaRoot = (tdsaRoot_t *)tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; #endif TI_DBG3(("tdsaSASDiscoverDone: start\n")); TI_DBG3(("tdsaSASDiscoverDone: pPort=%p DONE\n", onePortContext)); TI_DBG3(("tdsaSASDiscoverDone: pid %d\n", onePortContext->id)); /* Set discovery status */ onePortContext->discovery.status = DISCOVERY_SAS_DONE; #ifdef TD_INTERNAL_DEBUG /* debugging only */ TI_DBG3(("tdsaSASDiscoverDone: BEFORE\n")); tdsaDumpAllExp(tiRoot, onePortContext, agNULL); tdsaDumpAllUpExp(tiRoot, onePortContext, agNULL); #endif /* clean up expanders data strucures; move to free exp when device is cleaned */ tdsaCleanAllExp(tiRoot, onePortContext); #ifdef TD_INTERNAL_DEBUG /* debugging only */ TI_DBG3(("tdsaSASDiscoverDone: AFTER\n")); tdsaDumpAllExp(tiRoot, onePortContext, agNULL); tdsaDumpAllUpExp(tiRoot, onePortContext, agNULL); #endif /* call back to notify discovery is done */ /* SATA is NOT enbled */ #ifndef SATA_ENABLE if (onePortContext->discovery.SeenBC == agTRUE) { TI_DBG3(("tdsaSASDiscoverDone: broadcast change; discover again\n")); tdssInternalRemovals(onePortContext->agRoot, onePortContext ); /* processed broadcast change */ onePortContext->discovery.SeenBC = agFALSE; if (tdsaAllShared->ResetInDiscovery != 0 && onePortContext->discovery.ResetTriggerred == agTRUE) { TI_DBG2(("tdsaSASDiscoverDone: tdsaBCTimer\n")); tdsaBCTimer(tiRoot, onePortContext); } else { tdsaDiscover( tiRoot, onePortContext, TDSA_DISCOVERY_TYPE_SAS, TDSA_DISCOVERY_OPTION_INCREMENTAL_START ); } } else { onePortContext->DiscoveryState = ITD_DSTATE_COMPLETED; if (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_FULL_START) { if (flag == tiSuccess) { #ifdef AGTIAPI_CTL if (tdsaAllShared->SASConnectTimeLimit) tdsaCTLSet(tiRoot, onePortContext, tiIntrEventTypeDiscovery, tiDiscOK); else #endif ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscOK, agNULL ); } else { TI_DBG1(("tdsaSASDiscoverDone: discovery failed\n")); tdssDiscoveryErrorRemovals(onePortContext->agRoot, onePortContext ); ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); } } else { if (flag == tiSuccess) { tdssReportChanges(onePortContext->agRoot, onePortContext ); } else { tdssReportRemovals(onePortContext->agRoot, onePortContext, agFALSE ); } } } #ifdef TBD /* ACKing BC */ tdsaAckBC(tiRoot, onePortContext); #endif #endif #ifdef SATA_ENABLE if (flag == tiSuccess) { TI_DBG3(("tdsaSASDiscoverDone: calling SATA discovery\n")); /* tdsaSATAFullDiscover() or tdsaincrementalDiscover() call sata discover when sata discover is done, call ostiInitiatorEvent */ if (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_FULL_START) { TI_DBG3(("tdsaSASDiscoverDone: calling FULL SATA discovery\n")); tdsaDiscover( tiRoot, onePortContext, AG_SA_DISCOVERY_TYPE_SATA, TDSA_DISCOVERY_OPTION_FULL_START ); } else { TI_DBG3(("tdsaSASDiscoverDone: calling INCREMENTAL SATA discovery\n")); tdsaDiscover( tiRoot, onePortContext, AG_SA_DISCOVERY_TYPE_SATA, TDSA_DISCOVERY_OPTION_INCREMENTAL_START ); } } else { /* error case */ TI_DBG1(("tdsaSASDiscoverDone: Error; clean up\n")); tdssDiscoveryErrorRemovals(onePortContext->agRoot, onePortContext ); onePortContext->discovery.SeenBC = agFALSE; onePortContext->DiscoveryState = ITD_DSTATE_COMPLETED; ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); } #endif return; } //temp only for testing osGLOBAL void tdsaReportManInfoSend( tiRoot_t *tiRoot, tdsaDeviceData_t *oneDeviceData ) { agsaRoot_t *agRoot; agRoot = oneDeviceData->agRoot; TI_DBG2(("tdsaReportManInfoSend: start\n")); tdSMPStart( tiRoot, agRoot, oneDeviceData, SMP_REPORT_MANUFACTURE_INFORMATION, agNULL, 0, AGSA_SMP_INIT_REQ, agNULL, 0 ); return; } osGLOBAL void tdsaReportManInfoRespRcvd( tiRoot_t *tiRoot, agsaRoot_t *agRoot, tdsaDeviceData_t *oneDeviceData, tdssSMPFrameHeader_t *frameHeader, agsaFrameHandle_t frameHandle ) { tdsaPortContext_t *onePortContext; tdsaDiscovery_t *discovery; TI_DBG2(("tdsaReportManInfoRespRcvd: start\n")); TI_DBG2(("tdsaReportManInfoRespRcvd: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG2(("tdsaReportManInfoRespRcvd: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); onePortContext = oneDeviceData->tdPortContext; discovery = &(onePortContext->discovery); if (frameHeader->smpFunctionResult == SMP_FUNCTION_ACCEPTED) { TI_DBG2(("tdsaReportManInfoRespRcvd: SMP accepted\n")); } else { TI_DBG1(("tdsaReportManInfoRespRcvd: SMP NOT accepted; fn result 0x%x\n", frameHeader->smpFunctionResult)); } TI_DBG2(("tdsaReportManInfoRespRcvd: discovery retries %d\n", discovery->retries)); discovery->retries++; if (discovery->retries >= DISCOVERY_RETRIES) { TI_DBG1(("tdsaReportManInfoRespRcvd: retries are over\n")); discovery->retries = 0; /* failed the discovery */ } else { TI_DBG1(("tdsaReportManInfoRespRcvd: keep retrying\n")); // start timer tdsaDiscoveryTimer(tiRoot, onePortContext, oneDeviceData); } return; } //end temp only for testing /***************************************************************************** *! \brief tdsaReportGeneralSend * * Purpose: This function sends Report General SMP to a device. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneDeviceData: Pointer to the device data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaReportGeneralSend( tiRoot_t *tiRoot, tdsaDeviceData_t *oneDeviceData ) { agsaRoot_t *agRoot; agRoot = oneDeviceData->agRoot; TI_DBG3(("tdsaReportGeneralSend: start\n")); tdSMPStart( tiRoot, agRoot, oneDeviceData, SMP_REPORT_GENERAL, agNULL, 0, AGSA_SMP_INIT_REQ, agNULL, 0 ); return; } /***************************************************************************** *! \brief tdsaReportGeneralRespRcvd * * Purpose: This function processes Report General response. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param agRoot: Pointer to chip/driver Instance. * \param oneDeviceData: Pointer to the device data. * \param frameHeader: Pointer to SMP frame header. * \param frameHandle: A Handle used to refer to the response frame * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaReportGeneralRespRcvd( tiRoot_t *tiRoot, agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, tdsaDeviceData_t *oneDeviceData, tdssSMPFrameHeader_t *frameHeader, agsaFrameHandle_t frameHandle ) { smpRespReportGeneral_t tdSMPReportGeneralResp; smpRespReportGeneral_t *ptdSMPReportGeneralResp; tdsaExpander_t *oneExpander; tdsaPortContext_t *onePortContext; tdsaDiscovery_t *discovery; #ifdef REMOVED bit32 i; #endif #ifndef DIRECT_SMP tdssSMPRequestBody_t *tdSMPRequestBody; tdSMPRequestBody = (tdssSMPRequestBody_t *)agIORequest->osData; #endif TI_DBG3(("tdsaReportGeneralRespRcvd: start\n")); TI_DBG3(("tdsaReportGeneralRespRcvd: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaReportGeneralRespRcvd: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); ptdSMPReportGeneralResp = &tdSMPReportGeneralResp; osti_memset(&tdSMPReportGeneralResp, 0, sizeof(smpRespReportGeneral_t)); #ifdef DIRECT_SMP saFrameReadBlock(agRoot, frameHandle, 4, ptdSMPReportGeneralResp, sizeof(smpRespReportGeneral_t)); #else saFrameReadBlock(agRoot, tdSMPRequestBody->IndirectSMPResp, 4, ptdSMPReportGeneralResp, sizeof(smpRespReportGeneral_t)); #endif //tdhexdump("tdsaReportGeneralRespRcvd", (bit8 *)ptdSMPReportGeneralResp, sizeof(smpRespReportGeneral_t)); #ifndef DIRECT_SMP ostiFreeMemory( tiRoot, tdSMPRequestBody->IndirectSMPReqosMemHandle, tdSMPRequestBody->IndirectSMPReqLen ); ostiFreeMemory( tiRoot, tdSMPRequestBody->IndirectSMPResposMemHandle, tdSMPRequestBody->IndirectSMPRespLen ); #endif onePortContext = oneDeviceData->tdPortContext; discovery = &(onePortContext->discovery); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaReportGeneralRespRcvd: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } if (frameHeader->smpFunctionResult == SMP_FUNCTION_ACCEPTED) { oneDeviceData->numOfPhys = (bit8) ptdSMPReportGeneralResp->numOfPhys; oneExpander = oneDeviceData->tdExpander; oneExpander->routingIndex = (bit16) REPORT_GENERAL_GET_ROUTEINDEXES(ptdSMPReportGeneralResp); #ifdef REMOVED for ( i = 0; i < oneDeviceData->numOfPhys; i++ ) { oneExpander->currentIndex[i] = 0; } #endif oneExpander->configReserved = 0; oneExpander->configRouteTable = REPORT_GENERAL_IS_CONFIGURABLE(ptdSMPReportGeneralResp) ? 1 : 0; oneExpander->configuring = REPORT_GENERAL_IS_CONFIGURING(ptdSMPReportGeneralResp) ? 1 : 0; TI_DBG3(("tdsaReportGeneralRespRcvd: oneExpander=%p numberofPhys=0x%x RoutingIndex=0x%x\n", oneExpander, oneDeviceData->numOfPhys, oneExpander->routingIndex)); TI_DBG3(("tdsaReportGeneralRespRcvd: configRouteTable=%d configuring=%d\n", oneExpander->configRouteTable, oneExpander->configuring)); if (oneExpander->configuring == 1) { discovery->retries++; if (discovery->retries >= DISCOVERY_RETRIES) { TI_DBG1(("tdsaReportGeneralRespRcvd: retries are over\n")); discovery->retries = 0; /* failed the discovery */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } else { TI_DBG1(("tdsaReportGeneralRespRcvd: keep retrying\n")); // start timer for sending ReportGeneral tdsaDiscoveryTimer(tiRoot, onePortContext, oneDeviceData); } } else { discovery->retries = 0; tdsaDiscoverSend(tiRoot, oneDeviceData); } } else { TI_DBG1(("tdsaReportGeneralRespRcvd: SMP failed; fn result 0x%x; stopping discovery\n", frameHeader->smpFunctionResult)); tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } return; } /***************************************************************************** *! \brief tdsaDiscoverSend * * Purpose: This function sends Discovery SMP to a device. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneDeviceData: Pointer to the device data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaDiscoverSend( tiRoot_t *tiRoot, tdsaDeviceData_t *oneDeviceData ) { agsaRoot_t *agRoot; tdsaExpander_t *oneExpander; smpReqDiscover_t smpDiscoverReq; TI_DBG3(("tdsaDiscoverSend: start\n")); TI_DBG3(("tdsaDiscoverSend: device %p did %d\n", oneDeviceData, oneDeviceData->id)); agRoot = oneDeviceData->agRoot; oneExpander = oneDeviceData->tdExpander; TI_DBG3(("tdsaDiscoverSend: phyID 0x%x\n", oneExpander->discoveringPhyId)); osti_memset(&smpDiscoverReq, 0, sizeof(smpReqDiscover_t)); smpDiscoverReq.reserved1 = 0; smpDiscoverReq.reserved2 = 0; smpDiscoverReq.phyIdentifier = oneExpander->discoveringPhyId; smpDiscoverReq.reserved3 = 0; tdSMPStart( tiRoot, agRoot, oneDeviceData, SMP_DISCOVER, (bit8 *)&smpDiscoverReq, sizeof(smpReqDiscover_t), AGSA_SMP_INIT_REQ, agNULL, 0 ); return; } /***************************************************************************** *! \brief tdsaDiscoverRespRcvd * * Purpose: This function processes Discovery response. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param agRoot: Pointer to chip/driver Instance. * \param oneDeviceData: Pointer to the device data. * \param frameHeader: Pointer to SMP frame header. * \param frameHandle: A Handle used to refer to the response frame * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaDiscoverRespRcvd( tiRoot_t *tiRoot, agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, tdsaDeviceData_t *oneDeviceData, tdssSMPFrameHeader_t *frameHeader, agsaFrameHandle_t frameHandle ) { smpRespDiscover_t *ptdSMPDiscoverResp; tdsaPortContext_t *onePortContext; tdsaExpander_t *oneExpander; tdsaDiscovery_t *discovery; #ifndef DIRECT_SMP tdssSMPRequestBody_t *tdSMPRequestBody; #endif TI_DBG3(("tdsaDiscoverRespRcvd: start\n")); TI_DBG3(("tdsaDiscoverRespRcvd: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaDiscoverRespRcvd: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); onePortContext = oneDeviceData->tdPortContext; oneExpander = oneDeviceData->tdExpander; discovery = &(onePortContext->discovery); #ifndef DIRECT_SMP tdSMPRequestBody = (tdssSMPRequestBody_t *)agIORequest->osData; #endif if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaDiscoverRespRcvd: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } ptdSMPDiscoverResp = &(discovery->SMPDiscoverResp); #ifdef DIRECT_SMP saFrameReadBlock(agRoot, frameHandle, 4, ptdSMPDiscoverResp, sizeof(smpRespDiscover_t)); #else saFrameReadBlock(agRoot, tdSMPRequestBody->IndirectSMPResp, 4, ptdSMPDiscoverResp, sizeof(smpRespDiscover_t)); #endif //tdhexdump("tdsaDiscoverRespRcvd", (bit8 *)ptdSMPDiscoverResp, sizeof(smpRespDiscover_t)); #ifndef DIRECT_SMP ostiFreeMemory( tiRoot, tdSMPRequestBody->IndirectSMPReqosMemHandle, tdSMPRequestBody->IndirectSMPReqLen ); ostiFreeMemory( tiRoot, tdSMPRequestBody->IndirectSMPResposMemHandle, tdSMPRequestBody->IndirectSMPRespLen ); #endif if ( frameHeader->smpFunctionResult == SMP_FUNCTION_ACCEPTED) { if ( onePortContext->discovery.status == DISCOVERY_UP_STREAM) { tdsaSASUpStreamDiscoverExpanderPhy(tiRoot, onePortContext, oneExpander, ptdSMPDiscoverResp); } else if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { tdsaSASDownStreamDiscoverExpanderPhy(tiRoot, onePortContext, oneExpander, ptdSMPDiscoverResp); } else if (onePortContext->discovery.status == DISCOVERY_CONFIG_ROUTING) { /* not done with configuring routing 1. set the timer 2. on timer expiration, call tdsaSASDownStreamDiscoverExpanderPhy() */ TI_DBG2(("tdsaDiscoverRespRcvd: still configuring routing; setting timer\n")); TI_DBG2(("tdsaDiscoverRespRcvd: onePortContext %p oneDeviceData %p ptdSMPDiscoverResp %p\n", onePortContext, oneDeviceData, ptdSMPDiscoverResp)); tdhexdump("tdsaDiscoverRespRcvd", (bit8*)ptdSMPDiscoverResp, sizeof(smpRespDiscover_t)); tdsaConfigureRouteTimer(tiRoot, onePortContext, oneExpander, ptdSMPDiscoverResp); } else { /* nothing */ } } else if (frameHeader->smpFunctionResult == PHY_VACANT) { TI_DBG3(("tdsaDiscoverRespRcvd: smpFunctionResult is PHY_VACANT, phyid %d\n", oneExpander->discoveringPhyId)); if ( onePortContext->discovery.status == DISCOVERY_UP_STREAM) { tdsaSASUpStreamDiscoverExpanderPhySkip(tiRoot, onePortContext, oneExpander); } else if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { tdsaSASDownStreamDiscoverExpanderPhySkip(tiRoot, onePortContext, oneExpander); } else if (onePortContext->discovery.status == DISCOVERY_CONFIG_ROUTING) { /* not done with configuring routing 1. set the timer 2. on timer expiration, call tdsaSASDownStreamDiscoverExpanderPhy() */ TI_DBG1(("tdsaDiscoverRespRcvd: still configuring routing; setting timer\n")); TI_DBG1(("tdsaDiscoverRespRcvd: onePortContext %p oneDeviceData %p ptdSMPDiscoverResp %p\n", onePortContext, oneDeviceData, ptdSMPDiscoverResp)); tdhexdump("tdsaDiscoverRespRcvd", (bit8*)ptdSMPDiscoverResp, sizeof(smpRespDiscover_t)); tdsaConfigureRouteTimer(tiRoot, onePortContext, oneExpander, ptdSMPDiscoverResp); } } else { TI_DBG1(("tdsaDiscoverRespRcvd: Discovery Error SMP function return result error=%x\n", frameHeader->smpFunctionResult)); tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } return; } /***************************************************************************** *! \brief tdsaSASUpStreamDiscoverExpanderPhy * * Purpose: This function actully does upstream traverse and finds out detailed * information about topology. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneExpander: Pointer to the expander data. * \param pDiscoverResp: Pointer to the Discovery SMP respsonse. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASUpStreamDiscoverExpanderPhy( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaExpander_t *oneExpander, smpRespDiscover_t *pDiscoverResp ) { tdsaDeviceData_t *oneDeviceData; tdsaDeviceData_t *AttachedDevice = agNULL; tdsaExpander_t *AttachedExpander; agsaSASIdentify_t sasIdentify; bit8 connectionRate; bit32 attachedSasHi, attachedSasLo; tdsaSASSubID_t agSASSubID; TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: start\n")); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaSASUpStreamDiscoverExpanderPhy: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } oneDeviceData = oneExpander->tdDevice; TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: Phy #%d of SAS %08x-%08x\n", oneExpander->discoveringPhyId, oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG3((" Attached device: %s\n", ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 0 ? "No Device" : (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 1 ? "End Device" : (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 2 ? "Edge Expander" : "Fanout Expander"))))); if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { TI_DBG3((" SAS address : %08x-%08x\n", DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp), DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp))); TI_DBG3((" SSP Target : %d\n", DISCRSP_IS_SSP_TARGET(pDiscoverResp)?1:0)); TI_DBG3((" STP Target : %d\n", DISCRSP_IS_STP_TARGET(pDiscoverResp)?1:0)); TI_DBG3((" SMP Target : %d\n", DISCRSP_IS_SMP_TARGET(pDiscoverResp)?1:0)); TI_DBG3((" SATA DEVICE : %d\n", DISCRSP_IS_SATA_DEVICE(pDiscoverResp)?1:0)); TI_DBG3((" SSP Initiator : %d\n", DISCRSP_IS_SSP_INITIATOR(pDiscoverResp)?1:0)); TI_DBG3((" STP Initiator : %d\n", DISCRSP_IS_STP_INITIATOR(pDiscoverResp)?1:0)); TI_DBG3((" SMP Initiator : %d\n", DISCRSP_IS_SMP_INITIATOR(pDiscoverResp)?1:0)); TI_DBG3((" Phy ID : %d\n", pDiscoverResp->phyIdentifier)); TI_DBG3((" Attached Phy ID: %d\n", pDiscoverResp->attachedPhyIdentifier)); } /* end for debugging */ /* for debugging */ if (oneExpander->discoveringPhyId != pDiscoverResp->phyIdentifier) { TI_DBG1(("tdsaSASUpStreamDiscoverExpanderPhy: !!! Incorrect SMP response !!!\n")); TI_DBG1(("tdsaSASUpStreamDiscoverExpanderPhy: Request PhyID #%d Response PhyID #%d\n", oneExpander->discoveringPhyId, pDiscoverResp->phyIdentifier)); tdhexdump("NO_DEVICE", (bit8*)pDiscoverResp, sizeof(smpRespDiscover_t)); tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); return; } /* saving routing attribute for non self-configuring expanders */ oneExpander->routingAttribute[pDiscoverResp->phyIdentifier] = DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp); /* for debugging */ // dumpRoutingAttributes(tiRoot, oneExpander, pDiscoverResp->phyIdentifier); if ( oneDeviceData->SASSpecDeviceType == SAS_FANOUT_EXPANDER_DEVICE ) { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: SA_SAS_DEV_TYPE_FANOUT_EXPANDER\n")); if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE) { TI_DBG1(("tdsaSASUpStreamDiscoverExpanderPhy: **** Topology Error subtractive routing on fanout expander device\n")); /* discovery error */ onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; /* (2.1.3) discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); return; } } else { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: SA_SAS_DEV_TYPE_EDGE_EXPANDER\n")); if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { /* Setup sasIdentify for the attached device */ sasIdentify.phyIdentifier = pDiscoverResp->phyIdentifier; sasIdentify.deviceType_addressFrameType = (bit8)(pDiscoverResp->attachedDeviceType & 0x70); sasIdentify.initiator_ssp_stp_smp = pDiscoverResp->attached_Ssp_Stp_Smp_Sata_Initiator; sasIdentify.target_ssp_stp_smp = pDiscoverResp->attached_SataPS_Ssp_Stp_Smp_Sata_Target; *(bit32*)sasIdentify.sasAddressHi = *(bit32*)pDiscoverResp->attachedSasAddressHi; *(bit32*)sasIdentify.sasAddressLo = *(bit32*)pDiscoverResp->attachedSasAddressLo; /* incremental discovery */ agSASSubID.sasAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify); agSASSubID.sasAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify); agSASSubID.initiator_ssp_stp_smp = sasIdentify.initiator_ssp_stp_smp; agSASSubID.target_ssp_stp_smp = sasIdentify.target_ssp_stp_smp; attachedSasHi = DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp); attachedSasLo = DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp); /* If the phy has subtractive routing attribute */ if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE) { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: SA_SAS_ROUTING_SUBTRACTIVE\n")); /* Setup upstream phys */ tdsaSASExpanderUpStreamPhyAdd(tiRoot, oneExpander, (bit8) pDiscoverResp->attachedPhyIdentifier); /* If the expander already has an upsteam device set up */ if (oneExpander->hasUpStreamDevice == agTRUE) { /* If the sas address doesn't match */ if ( ((oneExpander->upStreamSASAddressHi != attachedSasHi) || (oneExpander->upStreamSASAddressLo != attachedSasLo)) && (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE || DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) ) { /* TODO: discovery error, callback */ TI_DBG1(("tdsaSASUpStreamDiscoverExpanderPhy: **** Topology Error subtractive routing error - inconsistent SAS address\n")); /* call back to notify discovery error */ onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } } else { /* Setup SAS address for up stream device */ oneExpander->hasUpStreamDevice = agTRUE; oneExpander->upStreamSASAddressHi = attachedSasHi; oneExpander->upStreamSASAddressLo = attachedSasLo; if ( (onePortContext->sasLocalAddressHi != attachedSasHi) || (onePortContext->sasLocalAddressLo != attachedSasLo) ) { /* Find the device from the discovered list */ AttachedDevice = tdsaPortSASDeviceFind(tiRoot, onePortContext, attachedSasLo, attachedSasHi); /* If the device has been discovered before */ if ( AttachedDevice != agNULL) { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: Seen This Device Before\n")); /* If attached device is an edge expander */ if ( AttachedDevice->SASSpecDeviceType == SAS_EDGE_EXPANDER_DEVICE) { /* The attached device is an expander */ AttachedExpander = AttachedDevice->tdExpander; /* If the two expanders are the root of the two edge expander sets */ if ( (AttachedExpander->upStreamSASAddressHi == DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo)) && (AttachedExpander->upStreamSASAddressLo == DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo)) ) { /* Setup upstream expander for the pExpander */ oneExpander->tdUpStreamExpander = AttachedExpander; } /* If the two expanders are not the root of the two edge expander sets */ else { /* TODO: loop found, discovery error, callback */ TI_DBG1(("tdsaSASUpStreamDiscoverExpanderPhy: **** Topology Error loop detection\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } } /* If attached device is not an edge expander */ else { /*TODO: should not happen, ASSERT */ TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy, *** Attached Device is not Edge. Confused!!\n")); } } /* If the device has not been discovered before */ else { /* Add the device */ TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: New device\n")); /* read minimum rate from the configuration onePortContext->LinkRate is SPC's local link rate */ connectionRate = (bit8)(MIN(onePortContext->LinkRate, DISCRSP_GET_LINKRATE(pDiscoverResp))); TI_DBG3(("siSASUpStreamDiscoverExpanderPhy: link rate 0x%x\n", onePortContext->LinkRate)); TI_DBG3(("siSASUpStreamDiscoverExpanderPhy: negotiatedPhyLinkRate 0x%x\n", DISCRSP_GET_LINKRATE(pDiscoverResp))); TI_DBG3(("siSASUpStreamDiscoverExpanderPhy: connectionRate 0x%x\n", connectionRate)); //hhhhhhhh if (DISCRSP_IS_STP_TARGET(pDiscoverResp) || DISCRSP_IS_SATA_DEVICE(pDiscoverResp)) { /* incremental discovery */ if (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_FULL_START) { AttachedDevice = tdsaPortSASDeviceAdd( tiRoot, onePortContext, sasIdentify, agFALSE, connectionRate, IT_NEXUS_TIMEOUT, 0, STP_DEVICE_TYPE, oneDeviceData, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = tdsaFindRegNValid( onePortContext->agRoot, onePortContext, &agSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = tdsaPortSASDeviceAdd( tiRoot, onePortContext, sasIdentify, agFALSE, connectionRate, IT_NEXUS_TIMEOUT, 0, STP_DEVICE_TYPE, oneDeviceData, pDiscoverResp->phyIdentifier ); } } } else { /* incremental discovery */ if (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_FULL_START) { AttachedDevice = tdsaPortSASDeviceAdd( tiRoot, onePortContext, sasIdentify, agFALSE, connectionRate, IT_NEXUS_TIMEOUT, 0, SAS_DEVICE_TYPE, oneDeviceData, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = tdsaFindRegNValid( onePortContext->agRoot, onePortContext, &agSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = tdsaPortSASDeviceAdd( tiRoot, onePortContext, sasIdentify, agFALSE, connectionRate, IT_NEXUS_TIMEOUT, 0, SAS_DEVICE_TYPE, oneDeviceData, pDiscoverResp->phyIdentifier ); } } } /* If the device is added successfully */ if ( AttachedDevice != agNULL) { /* (3.1.2.3.2.3.2.1) callback about new device */ if ( DISCRSP_IS_SSP_TARGET(pDiscoverResp) || DISCRSP_IS_SSP_INITIATOR(pDiscoverResp) || DISCRSP_IS_SMP_INITIATOR(pDiscoverResp) || DISCRSP_IS_SMP_INITIATOR(pDiscoverResp) ) { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: Found SSP/SMP SAS %08x-%08x\n", attachedSasHi, attachedSasLo)); } else { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: Found a SAS STP device.\n")); } /* If the attached device is an expander */ if ( (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) || (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) ) { /* Allocate an expander data structure */ AttachedExpander = tdssSASDiscoveringExpanderAlloc( tiRoot, onePortContext, AttachedDevice ); TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: Found expander=%p\n", AttachedExpander)); /* If allocate successfully */ if ( AttachedExpander != agNULL) { /* Add the pAttachedExpander to discovering list */ tdssSASDiscoveringExpanderAdd(tiRoot, onePortContext, AttachedExpander); /* Setup upstream expander for the pExpander */ oneExpander->tdUpStreamExpander = AttachedExpander; } /* If failed to allocate */ else { TI_DBG1(("tdsaSASUpStreamDiscoverExpanderPhy, Failed to allocate expander data structure\n")); tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } } /* If the attached device is an end device */ else { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: Found end device\n")); /* LP2006-05-26 added upstream device to the newly found device */ AttachedDevice->tdExpander = oneExpander; oneExpander->tdUpStreamExpander = agNULL; } } else { TI_DBG1(("tdsaSASUpStreamDiscoverExpanderPhy, Failed to add a device\n")); tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } } } } } /* substractive routing */ } } oneExpander->discoveringPhyId ++; if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: DISCOVERY_UP_STREAM find more ...\n")); /* continue discovery for the next phy */ tdsaDiscoverSend(tiRoot, oneDeviceData); } else { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: DISCOVERY_UP_STREAM last phy continue upstream..\n")); /* remove the expander from the discovering list */ tdssSASDiscoveringExpanderRemove(tiRoot, onePortContext, oneExpander); /* continue upstream discovering */ tdsaSASUpStreamDiscovering(tiRoot, onePortContext, oneDeviceData); } } else { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: onePortContext->discovery.status not in DISCOVERY_UP_STREAM; status %d\n", onePortContext->discovery.status)); } TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhy: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } // for debugging only osGLOBAL tdsaExpander_t * tdsaFindUpStreamConfigurableExp(tiRoot_t *tiRoot, tdsaExpander_t *oneExpander) { tdsaExpander_t *ret=agNULL; tdsaExpander_t *UpsreamExpander = oneExpander->tdUpStreamExpander; TI_DBG3(("tdsaFindUpStreamConfigurableExp: start\n")); TI_DBG3(("tdsaFindUpStreamConfigurableExp: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaFindUpStreamConfigurableExp: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); if (UpsreamExpander) { TI_DBG3(("tdsaFindUpStreamConfigurableExp: NO upsream expander\n")); } else { while (UpsreamExpander) { TI_DBG3(("tdsaFindUpStreamConfigurableExp: exp addrHi 0x%08x\n", UpsreamExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaFindUpStreamConfigurableExp: exp addrLo 0x%08x\n", UpsreamExpander->tdDevice->SASAddressID.sasAddressLo)); UpsreamExpander = UpsreamExpander->tdUpStreamExpander; } } return ret; } /***************************************************************************** *! \brief tdsaSASUpStreamDiscoverExpanderPhySkip * * Purpose: This function skips a phy which returned PHY_VACANT in SMP * response in upstream * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneExpander: Pointer to the expander data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASUpStreamDiscoverExpanderPhySkip( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaExpander_t *oneExpander ) { tdsaDeviceData_t *oneDeviceData; TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhySkip: start\n")); oneDeviceData = oneExpander->tdDevice; TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhySkip: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhySkip: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); oneExpander->discoveringPhyId ++; if (onePortContext->discovery.status == DISCOVERY_UP_STREAM) { if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhySkip: More Phys to discover\n")); /* continue discovery for the next phy */ tdsaDiscoverSend(tiRoot, oneDeviceData); } else { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhySkip: No More Phys\n")); /* remove the expander from the discovering list */ tdssSASDiscoveringExpanderRemove(tiRoot, onePortContext, oneExpander); /* continue upstream discovering */ tdsaSASUpStreamDiscovering(tiRoot, onePortContext, oneDeviceData); } } else { TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhySkip: onePortContext->discovery.status not in DISCOVERY_UP_STREAM; status %d\n", onePortContext->discovery.status)); } TI_DBG3(("tdsaSASUpStreamDiscoverExpanderPhySkip: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } // for debugging only osGLOBAL tdsaExpander_t * tdsaFindDownStreamConfigurableExp(tiRoot_t *tiRoot, tdsaExpander_t *oneExpander) { tdsaExpander_t *ret=agNULL; tdsaExpander_t *DownsreamExpander = oneExpander->tdCurrentDownStreamExpander; TI_DBG3(("tdsaFindDownStreamConfigurableExp: start\n")); TI_DBG3(("tdsaFindDownStreamConfigurableExp: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaFindDownStreamConfigurableExp: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); if (DownsreamExpander) { TI_DBG3(("tdsaFindDownStreamConfigurableExp: NO downsream expander\n")); } else { while (DownsreamExpander) { TI_DBG3(("tdsaFindDownStreamConfigurableExp: exp addrHi 0x%08x\n", DownsreamExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaFindDownStreamConfigurableExp: exp addrLo 0x%08x\n", DownsreamExpander->tdDevice->SASAddressID.sasAddressLo)); DownsreamExpander = DownsreamExpander->tdCurrentDownStreamExpander; } } return ret; } // for debugging only osGLOBAL void dumpRoutingAttributes( tiRoot_t *tiRoot, tdsaExpander_t *oneExpander, bit8 phyID ) { bit32 i; TI_DBG3(("dumpRoutingAttributes: start\n")); TI_DBG3(("dumpRoutingAttributes: phyID %d\n", phyID)); TI_DBG3(("dumpRoutingAttributes: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("dumpRoutingAttributes: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); for(i=0;i <= ((bit32)phyID + 1); i++) { TI_DBG3(("dumpRoutingAttributes: index %d routing attribute %d\n", i, oneExpander->routingAttribute[i])); } return; } /***************************************************************************** *! \brief tdsaDumpAllExp * * Purpose: This function prints out all expanders seen by discovery. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneExpander: Pointer to the expander data. * * \return: * None * * \note: For debugging only * *****************************************************************************/ osGLOBAL void tdsaDumpAllExp( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaExpander_t *oneExpander ) { #if 0 /* for debugging only */ tdList_t *ExpanderList; tdsaExpander_t *tempExpander; tdsaExpander_t *UpsreamExpander; tdsaExpander_t *DownsreamExpander; tdsaPortContext_t *tmpOnePortContext = onePortContext; TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: start\n")); TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: onePortcontext %p oneExpander %p\n", onePortContext, oneExpander)); /* debugging */ tdsaSingleThreadedEnter(tiRoot, TD_DISC_LOCK); if (TDLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: empty discoveringExpanderList\n")); return; } else { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); } ExpanderList = tmpOnePortContext->discovery.discoveringExpanderList.flink; while (ExpanderList != &(tmpOnePortContext->discovery.discoveringExpanderList)) { tempExpander = TDLIST_OBJECT_BASE(tdsaExpander_t, linkNode, ExpanderList); UpsreamExpander = tempExpander->tdUpStreamExpander; DownsreamExpander = tempExpander->tdCurrentDownStreamExpander; TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: expander id %d\n", tempExpander->id)); TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: exp addrHi 0x%08x\n", tempExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: exp addrLo 0x%08x\n", tempExpander->tdDevice->SASAddressID.sasAddressLo)); if (UpsreamExpander) { TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: Up exp addrHi 0x%08x\n", UpsreamExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: Up exp addrLo 0x%08x\n", UpsreamExpander->tdDevice->SASAddressID.sasAddressLo)); } else { TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: No Upstream expander\n")); } if (DownsreamExpander) { TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: Down exp addrHi 0x%08x\n", DownsreamExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: Down exp addrLo 0x%08x\n", DownsreamExpander->tdDevice->SASAddressID.sasAddressLo)); } else { TI_DBG3(("tdssSASDiscoveringExpander tdsaDumpAllExp: No Downstream expander\n")); } ExpanderList = ExpanderList->flink; } #endif return; } /***************************************************************************** *! \brief tdsaDumpAllUpExp * * Purpose: This function prints out all upstream expanders seen by discovery. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneExpander: Pointer to the expander data. * * \return: * None * * \note: For debugging only * *****************************************************************************/ osGLOBAL void tdsaDumpAllUpExp( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaExpander_t *oneExpander ) { return; } /***************************************************************************** *! \brief tdsaDumpAllFreeExp * * Purpose: This function prints out all free expanders. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \return: * None * * \note: For debugging only * *****************************************************************************/ osGLOBAL void tdsaDumpAllFreeExp( tiRoot_t *tiRoot ) { return; } /***************************************************************************** *! \brief tdsaDuplicateConfigSASAddr * * Purpose: This function finds whether SAS address has added to the routing * table of expander or not. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneExpander: Pointer to the expander data. * \param configSASAddressHi: Upper 4 byte of SAS address. * \param configSASAddressLo: Lower 4 byte of SAS address. * * \return: * agTRUE No need to add configSASAddress. * agFALSE Need to add configSASAddress. * * \note: * *****************************************************************************/ osGLOBAL bit32 tdsaDuplicateConfigSASAddr( tiRoot_t *tiRoot, tdsaExpander_t *oneExpander, bit32 configSASAddressHi, bit32 configSASAddressLo ) { bit32 i; bit32 ret = agFALSE; TI_DBG3(("tdsaDuplicateConfigSASAddr: start\n")); if (oneExpander == agNULL) { TI_DBG3(("tdsaDuplicateConfigSASAddr: NULL expander\n")); return agTRUE; } if (oneExpander->tdDevice->SASAddressID.sasAddressHi == configSASAddressHi && oneExpander->tdDevice->SASAddressID.sasAddressLo == configSASAddressLo ) { TI_DBG3(("tdsaDuplicateConfigSASAddr: unnecessary\n")); return agTRUE; } TI_DBG3(("tdsaDuplicateConfigSASAddr: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaDuplicateConfigSASAddr: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); TI_DBG3(("tdsaDuplicateConfigSASAddr: configsasAddressHi 0x%08x\n", configSASAddressHi)); TI_DBG3(("tdsaDuplicateConfigSASAddr: configsasAddressLo 0x%08x\n", configSASAddressLo)); TI_DBG3(("tdsaDuplicateConfigSASAddr: configSASAddrTableIndex %d\n", oneExpander->configSASAddrTableIndex)); for(i=0;iconfigSASAddrTableIndex;i++) { if (oneExpander->configSASAddressHiTable[i] == configSASAddressHi && oneExpander->configSASAddressLoTable[i] == configSASAddressLo ) { TI_DBG3(("tdsaDuplicateConfigSASAddr: FOUND!!!\n")); ret = agTRUE; break; } } /* new one; let's add it */ if (ret == agFALSE) { TI_DBG3(("tdsaDuplicateConfigSASAddr: adding configSAS Addr!!!\n")); TI_DBG3(("tdsaDuplicateConfigSASAddr: configSASAddrTableIndex %d\n", oneExpander->configSASAddrTableIndex)); oneExpander->configSASAddressHiTable[oneExpander->configSASAddrTableIndex] = configSASAddressHi; oneExpander->configSASAddressLoTable[oneExpander->configSASAddrTableIndex] = configSASAddressLo; oneExpander->configSASAddrTableIndex++; } return ret; } /***************************************************************************** *! \brief tdsaFindConfigurableExp * * Purpose: This function finds whether there is a configurable expander in * the upstream expander list. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneExpander: Pointer to the expander data. * * \return: * agTRUE There is configurable expander. * agFALSE There is not configurable expander. * * \note: * *****************************************************************************/ osGLOBAL tdsaExpander_t * tdsaFindConfigurableExp( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaExpander_t *oneExpander ) { tdsaExpander_t *tempExpander; tdsaPortContext_t *tmpOnePortContext = onePortContext; tdsaExpander_t *ret = agNULL; TI_DBG3(("tdsaFindConfigurableExp: start\n")); if (oneExpander == agNULL) { TI_DBG3(("tdsaFindConfigurableExp: NULL expander\n")); return agNULL; } TI_DBG3(("tdsaFindConfigurableExp: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaFindConfigurableExp: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); tdsaSingleThreadedEnter(tiRoot, TD_DISC_LOCK); if (TDLIST_EMPTY(&(tmpOnePortContext->discovery.discoveringExpanderList))) { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); TI_DBG3(("tdsaFindConfigurableExp: empty UpdiscoveringExpanderList\n")); return agNULL; } else { tdsaSingleThreadedLeave(tiRoot, TD_DISC_LOCK); } tempExpander = oneExpander->tdUpStreamExpander; while (tempExpander) { TI_DBG3(("tdsaFindConfigurableExp: loop exp addrHi 0x%08x\n", tempExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaFindConfigurableExp: loop exp addrLo 0x%08x\n", tempExpander->tdDevice->SASAddressID.sasAddressLo)); if (tempExpander->configRouteTable) { TI_DBG3(("tdsaFindConfigurableExp: found configurable expander\n")); ret = tempExpander; break; } tempExpander = tempExpander->tdUpStreamExpander; } return ret; } /***************************************************************************** *! \brief tdsaSASDownStreamDiscoverExpanderPhy * * Purpose: This function actully does downstream traverse and finds out detailed * information about topology. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneExpander: Pointer to the expander data. * \param pDiscoverResp: Pointer to the Discovery SMP respsonse. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASDownStreamDiscoverExpanderPhy( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaExpander_t *oneExpander, smpRespDiscover_t *pDiscoverResp ) { tdsaDeviceData_t *oneDeviceData; tdsaExpander_t *UpStreamExpander; tdsaDeviceData_t *AttachedDevice = agNULL; tdsaExpander_t *AttachedExpander; agsaSASIdentify_t sasIdentify; bit8 connectionRate; bit32 attachedSasHi, attachedSasLo; tdsaSASSubID_t agSASSubID; tdsaExpander_t *ConfigurableExpander = agNULL; bit32 dupConfigSASAddr = agFALSE; bit32 configSASAddressHi; bit32 configSASAddressLo; TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: start\n")); TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); TD_ASSERT(tiRoot, "(tdsaSASDownStreamDiscoverExpanderPhy) agRoot NULL"); TD_ASSERT(onePortContext, "(tdsaSASDownStreamDiscoverExpanderPhy) pPort NULL"); TD_ASSERT(oneExpander, "(tdsaSASDownStreamDiscoverExpanderPhy) pExpander NULL"); TD_ASSERT(pDiscoverResp, "(tdsaSASDownStreamDiscoverExpanderPhy) pDiscoverResp NULL"); TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: onePortContxt=%p oneExpander=%p oneDeviceData=%p\n", onePortContext, oneExpander, oneExpander->tdDevice)); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } #ifdef TD_INTERNAL_DEBUG tdsaDumpAllExp(tiRoot, onePortContext, oneExpander); tdsaFindUpStreamConfigurableExp(tiRoot, oneExpander); tdsaFindDownStreamConfigurableExp(tiRoot, oneExpander); #endif /* (1) Find the device structure of the expander */ oneDeviceData = oneExpander->tdDevice; TD_ASSERT(oneDeviceData, "(tdsaSASDownStreamDiscoverExpanderPhy) pDevice NULL"); /* for debugging */ TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: Phy #%d of SAS %08x-%08x\n", oneExpander->discoveringPhyId, oneDeviceData->SASAddressID.sasAddressHi, oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG3((" Attached device: %s\n", ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 0 ? "No Device" : (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 1 ? "End Device" : (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == 2 ? "Edge Expander" : "Fanout Expander"))))); /* for debugging */ if (oneExpander->discoveringPhyId != pDiscoverResp->phyIdentifier) { TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: !!! Incorrect SMP response !!!\n")); TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: Request PhyID #%d Response PhyID #%d\n", oneExpander->discoveringPhyId, pDiscoverResp->phyIdentifier)); tdhexdump("NO_DEVICE", (bit8*)pDiscoverResp, sizeof(smpRespDiscover_t)); tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); return; } #ifdef TD_INTERNAL_DEBUG /* debugging only */ if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_NO_DEVICE) { tdhexdump("NO_DEVICE", (bit8*)pDiscoverResp, sizeof(smpRespDiscover_t)); } #endif if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { TI_DBG3((" SAS address : %08x-%08x\n", DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp), DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp))); TI_DBG3((" SSP Target : %d\n", DISCRSP_IS_SSP_TARGET(pDiscoverResp)?1:0)); TI_DBG3((" STP Target : %d\n", DISCRSP_IS_STP_TARGET(pDiscoverResp)?1:0)); TI_DBG3((" SMP Target : %d\n", DISCRSP_IS_SMP_TARGET(pDiscoverResp)?1:0)); TI_DBG3((" SATA DEVICE : %d\n", DISCRSP_IS_SATA_DEVICE(pDiscoverResp)?1:0)); TI_DBG3((" SSP Initiator : %d\n", DISCRSP_IS_SSP_INITIATOR(pDiscoverResp)?1:0)); TI_DBG3((" STP Initiator : %d\n", DISCRSP_IS_STP_INITIATOR(pDiscoverResp)?1:0)); TI_DBG3((" SMP Initiator : %d\n", DISCRSP_IS_SMP_INITIATOR(pDiscoverResp)?1:0)); TI_DBG3((" Phy ID : %d\n", pDiscoverResp->phyIdentifier)); TI_DBG3((" Attached Phy ID: %d\n", pDiscoverResp->attachedPhyIdentifier)); } /* end for debugging */ /* saving routing attribute for non self-configuring expanders */ oneExpander->routingAttribute[pDiscoverResp->phyIdentifier] = DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp); /* for debugging */ // dumpRoutingAttributes(tiRoot, oneExpander, pDiscoverResp->phyIdentifier); oneExpander->discoverSMPAllowed = agTRUE; /* If a device is attached */ if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) != SAS_NO_DEVICE) { /* Setup sasIdentify for the attached device */ sasIdentify.phyIdentifier = pDiscoverResp->phyIdentifier; sasIdentify.deviceType_addressFrameType = (bit8)(pDiscoverResp->attachedDeviceType & 0x70); sasIdentify.initiator_ssp_stp_smp = pDiscoverResp->attached_Ssp_Stp_Smp_Sata_Initiator; sasIdentify.target_ssp_stp_smp = pDiscoverResp->attached_SataPS_Ssp_Stp_Smp_Sata_Target; *(bit32*)sasIdentify.sasAddressHi = *(bit32*)pDiscoverResp->attachedSasAddressHi; *(bit32*)sasIdentify.sasAddressLo = *(bit32*)pDiscoverResp->attachedSasAddressLo; /* incremental discovery */ agSASSubID.sasAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify); agSASSubID.sasAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify); agSASSubID.initiator_ssp_stp_smp = sasIdentify.initiator_ssp_stp_smp; agSASSubID.target_ssp_stp_smp = sasIdentify.target_ssp_stp_smp; attachedSasHi = DISCRSP_GET_ATTACHED_SAS_ADDRESSHI(pDiscoverResp); attachedSasLo = DISCRSP_GET_ATTACHED_SAS_ADDRESSLO(pDiscoverResp); /* If it's a direct routing */ if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_DIRECT) { /* If the attached device is an expander */ if ( (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) || (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) ) { TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: **** Topology Error direct routing can't connect to expander\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); return; } } /* If the expander's attached device is not myself */ if ( (attachedSasHi != onePortContext->sasLocalAddressHi) || (attachedSasLo != onePortContext->sasLocalAddressLo) ) { /* Find the attached device from discovered list */ AttachedDevice = tdsaPortSASDeviceFind(tiRoot, onePortContext, attachedSasLo, attachedSasHi); /* If the device has not been discovered before */ if ( AttachedDevice == agNULL) //11 { /* If the phy has subtractive routing attribute */ if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE && (DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE || DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) ) { /* TODO: discovery error, callback */ TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: **** Topology Error subtractive routing error - inconsistent SAS address\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } else { /* Add the device */ /* read minimum rate from the configuration onePortContext->LinkRate is SPC's local link rate */ connectionRate = (bit8)(MIN(onePortContext->LinkRate, DISCRSP_GET_LINKRATE(pDiscoverResp))); TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: link rate 0x%x\n", DEVINFO_GET_LINKRATE(&oneDeviceData->agDeviceInfo))); TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: negotiatedPhyLinkRate 0x%x\n", DISCRSP_GET_LINKRATE(pDiscoverResp))); TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: connectionRate 0x%x\n", connectionRate)); if (DISCRSP_IS_STP_TARGET(pDiscoverResp) || DISCRSP_IS_SATA_DEVICE(pDiscoverResp)) { if (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_FULL_START) { AttachedDevice = tdsaPortSASDeviceAdd( tiRoot, onePortContext, sasIdentify, agFALSE, connectionRate, IT_NEXUS_TIMEOUT, 0, STP_DEVICE_TYPE, oneDeviceData, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = tdsaFindRegNValid( onePortContext->agRoot, onePortContext, &agSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = tdsaPortSASDeviceAdd( tiRoot, onePortContext, sasIdentify, agFALSE, connectionRate, IT_NEXUS_TIMEOUT, 0, STP_DEVICE_TYPE, oneDeviceData, pDiscoverResp->phyIdentifier ); } } } else { if (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_FULL_START) { AttachedDevice = tdsaPortSASDeviceAdd( tiRoot, onePortContext, sasIdentify, agFALSE, connectionRate, IT_NEXUS_TIMEOUT, 0, SAS_DEVICE_TYPE, oneDeviceData, pDiscoverResp->phyIdentifier ); } else { /* incremental discovery */ AttachedDevice = tdsaFindRegNValid( onePortContext->agRoot, onePortContext, &agSASSubID ); /* not registered and not valid; add this*/ if (AttachedDevice == agNULL) { AttachedDevice = tdsaPortSASDeviceAdd( tiRoot, onePortContext, sasIdentify, agFALSE, connectionRate, IT_NEXUS_TIMEOUT, 0, SAS_DEVICE_TYPE, oneDeviceData, pDiscoverResp->phyIdentifier ); } } } TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: newDevice pDevice=%p\n", AttachedDevice)); /* If the device is added successfully */ if ( AttachedDevice != agNULL) { if ( SA_IDFRM_IS_SSP_TARGET(&sasIdentify) || SA_IDFRM_IS_SMP_TARGET(&sasIdentify) || SA_IDFRM_IS_SSP_INITIATOR(&sasIdentify) || SA_IDFRM_IS_SMP_INITIATOR(&sasIdentify) ) { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: Report a new SAS device !!\n")); } else { if ( SA_IDFRM_IS_STP_TARGET(&sasIdentify) || SA_IDFRM_IS_SATA_DEVICE(&sasIdentify) ) { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: Found an STP or SATA device.\n")); } else { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: Found Other type of device.\n")); } } /* LP2006-05-26 added upstream device to the newly found device */ AttachedDevice->tdExpander = oneExpander; /* If the phy has table routing attribute */ if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_TABLE) { /* If the attached device is a fan out expander */ if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) { /* TODO: discovery error, callback */ TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: **** Topology Error two table routing phys are connected\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } else if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) { /* Allocate an expander data structure */ AttachedExpander = tdssSASDiscoveringExpanderAlloc(tiRoot, onePortContext, AttachedDevice); TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: Found a EDGE exp device.%p\n", AttachedExpander)); /* If allocate successfully */ if ( AttachedExpander != agNULL) { /* set up downstream information on configurable expander */ if (oneExpander->configRouteTable) { tdsaSASExpanderDownStreamPhyAdd(tiRoot, oneExpander, (bit8) oneExpander->discoveringPhyId); } /* Setup upstream information */ tdsaSASExpanderUpStreamPhyAdd(tiRoot, AttachedExpander, (bit8) oneExpander->discoveringPhyId); AttachedExpander->hasUpStreamDevice = agTRUE; AttachedExpander->upStreamSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); AttachedExpander->upStreamSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); AttachedExpander->tdUpStreamExpander = oneExpander; /* (2.3.2.2.2.2.2.2.2) Add the pAttachedExpander to discovering list */ tdssSASDiscoveringExpanderAdd(tiRoot, onePortContext, AttachedExpander); } /* If failed to allocate */ else { TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy, Failed to allocate expander data structure\n")); /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } } } /* If status is still DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: 1st before\n")); tdsaDumpAllUpExp(tiRoot, onePortContext, oneExpander); UpStreamExpander = oneExpander->tdUpStreamExpander; ConfigurableExpander = tdsaFindConfigurableExp(tiRoot, onePortContext, oneExpander); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); if (ConfigurableExpander) { if ( (ConfigurableExpander->tdDevice->SASAddressID.sasAddressHi == DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo)) && (ConfigurableExpander->tdDevice->SASAddressID.sasAddressLo == DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo)) ) { /* directly attached between oneExpander and ConfigurableExpander */ TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: 1st before loc 1\n")); configSASAddressHi = oneExpander->tdDevice->SASAddressID.sasAddressHi; configSASAddressLo = oneExpander->tdDevice->SASAddressID.sasAddressLo; } else { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: 1st before loc 2\n")); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); } } /* if !ConfigurableExpander */ dupConfigSASAddr = tdsaDuplicateConfigSASAddr(tiRoot, ConfigurableExpander, configSASAddressHi, configSASAddressLo ); if ( ConfigurableExpander && dupConfigSASAddr == agFALSE) { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: 1st q123\n")); UpStreamExpander->tdCurrentDownStreamExpander = oneExpander; ConfigurableExpander->currentDownStreamPhyIndex = tdsaFindCurrentDownStreamPhyIndex(tiRoot, ConfigurableExpander); ConfigurableExpander->tdReturnginExpander = oneExpander; tdsaSASRoutingEntryAdd(tiRoot, ConfigurableExpander, ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex], configSASAddressHi, configSASAddressLo ); } } } /* If fail to add the device */ else { TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy, Failed to add a device\n")); /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } } } /* If the device has been discovered before */ else /* haha discovered before */ { /* If the phy has subtractive routing attribute */ if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_SUBTRACTIVE) { /* If the expander doesn't have up stream device */ if ( oneExpander->hasUpStreamDevice == agFALSE) { /* TODO: discovery error, callback */ TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: **** Topology Error loop, or end device connects to two expanders\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } /* If the expander has up stream device */ else { /* If sas address doesn't match */ if ( (oneExpander->upStreamSASAddressHi != attachedSasHi) || (oneExpander->upStreamSASAddressLo != attachedSasLo) ) { /* TODO: discovery error, callback */ TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: **** Topology Error two subtractive phys\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } } } /* If the phy has table routing attribute */ else if ( DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_TABLE) { /* If the attached device is a fan out expander */ if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_FANOUT_EXPANDER_DEVICE) { /* (2.3.3.2.1.1) TODO: discovery error, callback */ TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: **** Topology Error fan out expander to routing table phy\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } /* If the attached device is an edge expander */ else if ( DISCRSP_GET_ATTACHED_DEVTYPE(pDiscoverResp) == SAS_EDGE_EXPANDER_DEVICE) { /* Setup up stream inform */ AttachedExpander = AttachedDevice->tdExpander; TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: Found edge expander=%p\n", AttachedExpander)); //hhhhhh /* If the attached expander has up stream device */ if ( AttachedExpander->hasUpStreamDevice == agTRUE) { /* compare the sas address */ if ( (AttachedExpander->upStreamSASAddressHi != DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo)) || (AttachedExpander->upStreamSASAddressLo != DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo))) { /* TODO: discovery error, callback */ TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: **** Topology Error two table routing phys connected (1)\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } else { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: Add edge expander=%p\n", AttachedExpander)); /* set up downstream information on configurable expander */ if (oneExpander->configRouteTable) { tdsaSASExpanderDownStreamPhyAdd(tiRoot, oneExpander, (bit8) oneExpander->discoveringPhyId); } /* haha */ tdsaSASExpanderUpStreamPhyAdd(tiRoot, AttachedExpander, (bit8) oneExpander->discoveringPhyId); /* Add the pAttachedExpander to discovering list */ tdssSASDiscoveringExpanderAdd(tiRoot, onePortContext, AttachedExpander); } } /* If the attached expander doesn't have up stream device */ else { /* TODO: discovery error, callback */ TI_DBG1(("tdsaSASDownStreamDiscoverExpanderPhy: **** Topology Error two table routing phys connected (2)\n")); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.sasAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&oneDeviceData->agDeviceInfo); onePortContext->discovery.sasAddressIDDiscoverError.phyIdentifier = oneExpander->discoveringPhyId; /* discovery done */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } } } /* for else if (DISCRSP_GET_ROUTINGATTRIB(pDiscoverResp) == SAS_ROUTING_TABLE) */ /* do this regradless of sub or table */ /* If status is still DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: 2nd before\n")); tdsaDumpAllUpExp(tiRoot, onePortContext, oneExpander); UpStreamExpander = oneExpander->tdUpStreamExpander; ConfigurableExpander = tdsaFindConfigurableExp(tiRoot, onePortContext, oneExpander); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); if (ConfigurableExpander) { if ( (ConfigurableExpander->tdDevice->SASAddressID.sasAddressHi == DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo)) && (ConfigurableExpander->tdDevice->SASAddressID.sasAddressLo == DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo)) ) { /* directly attached between oneExpander and ConfigurableExpander */ TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: 2nd before loc 1\n")); configSASAddressHi = oneExpander->tdDevice->SASAddressID.sasAddressHi; configSASAddressLo = oneExpander->tdDevice->SASAddressID.sasAddressLo; } else { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: 2nd before loc 2\n")); configSASAddressHi = DEVINFO_GET_SAS_ADDRESSHI(&AttachedDevice->agDeviceInfo); configSASAddressLo = DEVINFO_GET_SAS_ADDRESSLO(&AttachedDevice->agDeviceInfo); } } /* if !ConfigurableExpander */ dupConfigSASAddr = tdsaDuplicateConfigSASAddr(tiRoot, ConfigurableExpander, configSASAddressHi, configSASAddressLo ); if ( ConfigurableExpander && dupConfigSASAddr == agFALSE) { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: 2nd q123 \n")); UpStreamExpander->tdCurrentDownStreamExpander = oneExpander; ConfigurableExpander->currentDownStreamPhyIndex = tdsaFindCurrentDownStreamPhyIndex(tiRoot, ConfigurableExpander); ConfigurableExpander->tdReturnginExpander = oneExpander; tdsaSASRoutingEntryAdd(tiRoot, ConfigurableExpander, ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex], configSASAddressHi, configSASAddressLo ); } } /* if (onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) */ /* incremental discovery */ if (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_INCREMENTAL_START) { connectionRate = (bit8)(MIN(onePortContext->LinkRate, DISCRSP_GET_LINKRATE(pDiscoverResp))); if (DISCRSP_IS_STP_TARGET(pDiscoverResp) || DISCRSP_IS_SATA_DEVICE(pDiscoverResp)) { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: incremental SATA_STP\n")); tdsaPortSASDeviceAdd( tiRoot, onePortContext, sasIdentify, agFALSE, connectionRate, IT_NEXUS_TIMEOUT, 0, STP_DEVICE_TYPE, oneDeviceData, pDiscoverResp->phyIdentifier ); } else { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: incremental SAS\n")); tdsaPortSASDeviceAdd( tiRoot, onePortContext, sasIdentify, agFALSE, connectionRate, IT_NEXUS_TIMEOUT, 0, SAS_DEVICE_TYPE, oneDeviceData, pDiscoverResp->phyIdentifier ); } } }/* else; existing devce */ } /* not attached to myself */ /* If the attached device is myself */ else { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: Found Self\n")); TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: 3rd before\n")); tdsaDumpAllUpExp(tiRoot, onePortContext, oneExpander); UpStreamExpander = oneExpander->tdUpStreamExpander; ConfigurableExpander = tdsaFindConfigurableExp(tiRoot, onePortContext, oneExpander); dupConfigSASAddr = tdsaDuplicateConfigSASAddr(tiRoot, ConfigurableExpander, onePortContext->sasLocalAddressHi, onePortContext->sasLocalAddressLo ); if ( ConfigurableExpander && dupConfigSASAddr == agFALSE) { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: 3rd q123 Setup routing table\n")); UpStreamExpander->tdCurrentDownStreamExpander = oneExpander; ConfigurableExpander->currentDownStreamPhyIndex = tdsaFindCurrentDownStreamPhyIndex(tiRoot, ConfigurableExpander); ConfigurableExpander->tdReturnginExpander = oneExpander; tdsaSASRoutingEntryAdd(tiRoot, ConfigurableExpander, ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex], onePortContext->sasLocalAddressHi, onePortContext->sasLocalAddressLo ); } } } /* If no device is attached */ else { } /* Increment the discovering phy id */ oneExpander->discoveringPhyId ++; /* If the discovery status is DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM ) { /* If not the last phy */ if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: More Phys to discover\n")); /* continue discovery for the next phy */ tdsaDiscoverSend(tiRoot, oneDeviceData); } /* If the last phy */ else { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: No More Phys\n")); /* remove the expander from the discovering list */ tdssSASDiscoveringExpanderRemove(tiRoot, onePortContext, oneExpander); /* continue downstream discovering */ tdsaSASDownStreamDiscovering(tiRoot, onePortContext, oneDeviceData); } } else { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: onePortContext->discovery.status not in DISCOVERY_DOWN_STREAM; status %d\n", onePortContext->discovery.status)); } TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhy: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } /***************************************************************************** *! \brief tdsaSASDownStreamDiscoverExpanderPhySkip * * Purpose: This function skips a phy which returned PHY_VACANT in SMP * response in downstream * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneExpander: Pointer to the expander data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASDownStreamDiscoverExpanderPhySkip( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaExpander_t *oneExpander ) { tdsaDeviceData_t *oneDeviceData; TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhySkip: start\n")); oneDeviceData = oneExpander->tdDevice; TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhySkip: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhySkip: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); /* Increment the discovering phy id */ oneExpander->discoveringPhyId ++; /* If the discovery status is DISCOVERY_DOWN_STREAM */ if ( onePortContext->discovery.status == DISCOVERY_DOWN_STREAM ) { /* If not the last phy */ if ( oneExpander->discoveringPhyId < oneDeviceData->numOfPhys ) { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhySkip: More Phys to discover\n")); /* continue discovery for the next phy */ tdsaDiscoverSend(tiRoot, oneDeviceData); } /* If the last phy */ else { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhySkip: No More Phys\n")); /* remove the expander from the discovering list */ tdssSASDiscoveringExpanderRemove(tiRoot, onePortContext, oneExpander); /* continue downstream discovering */ tdsaSASDownStreamDiscovering(tiRoot, onePortContext, oneDeviceData); } } else { TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhySkip: onePortContext->discovery.status not in DISCOVERY_DOWN_STREAM; status %d\n", onePortContext->discovery.status)); } TI_DBG3(("tdsaSASDownStreamDiscoverExpanderPhySkip: end return phyID#%d\n", oneExpander->discoveringPhyId - 1)); return; } /***************************************************************************** *! \brief tdsaSASRoutingEntryAdd * * Purpose: This function adds a routing entry in the configurable expander. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneExpander: Pointer to the expander data. * \param phyId: Phy identifier. * \param configSASAddressHi: Upper 4 byte of SAS address. * \param configSASAddressLo: Lower 4 byte of SAS address. * * \return: * agTRUE Routing entry is added successfully * agFALSE Routing entry is not added successfully * * \note: * *****************************************************************************/ osGLOBAL bit32 tdsaSASRoutingEntryAdd( tiRoot_t *tiRoot, tdsaExpander_t *oneExpander, bit32 phyId, bit32 configSASAddressHi, bit32 configSASAddressLo ) { bit32 ret = agTRUE; smpReqConfigureRouteInformation_t confRoutingInfo; tdsaPortContext_t *onePortContext; bit32 i; agsaRoot_t *agRoot; TI_DBG3(("tdsaSASRoutingEntryAdd: start\n")); TI_DBG3(("tdsaSASRoutingEntryAdd: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaSASRoutingEntryAdd: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); TI_DBG3(("tdsaSASRoutingEntryAdd: phyid %d\n", phyId)); /* needs to compare the location of oneExpander and configSASAddress add only if oneExpander | configSASaddress */ if (oneExpander->tdDevice->SASAddressID.sasAddressHi == configSASAddressHi && oneExpander->tdDevice->SASAddressID.sasAddressLo == configSASAddressLo ) { TI_DBG3(("tdsaSASRoutingEntryAdd: unnecessary\n")); return ret; } if (oneExpander->routingAttribute[phyId] != SAS_ROUTING_TABLE) { TI_DBG3(("tdsaSASRoutingEntryAdd: not table routing, routing is %d\n", oneExpander->routingAttribute[phyId])); return ret; } agRoot = oneExpander->tdDevice->agRoot; onePortContext = oneExpander->tdDevice->tdPortContext; onePortContext->discovery.status = DISCOVERY_CONFIG_ROUTING; /* reset smpReqConfigureRouteInformation_t */ osti_memset(&confRoutingInfo, 0, sizeof(smpReqConfigureRouteInformation_t)); if ( oneExpander->currentIndex[phyId] < oneExpander->routingIndex ) { TI_DBG3(("tdsaSASRoutingEntryAdd: adding sasAddressHi 0x%08x\n", configSASAddressHi)); TI_DBG3(("tdsaSASRoutingEntryAdd: adding sasAddressLo 0x%08x\n", configSASAddressLo)); TI_DBG3(("tdsaSASRoutingEntryAdd: phyid %d currentIndex[phyid] %d\n", phyId, oneExpander->currentIndex[phyId])); oneExpander->configSASAddressHi = configSASAddressHi; oneExpander->configSASAddressLo = configSASAddressLo; confRoutingInfo.reserved1[0] = 0; confRoutingInfo.reserved1[1] = 0; OSSA_WRITE_BE_16(agRoot, confRoutingInfo.expanderRouteIndex, 0, (oneExpander->currentIndex[phyId])); confRoutingInfo.reserved2 = 0; confRoutingInfo.phyIdentifier = (bit8)phyId; confRoutingInfo.reserved3[0] = 0; confRoutingInfo.reserved3[1] = 0; confRoutingInfo.disabledBit_reserved4 = 0; confRoutingInfo.reserved5[0] = 0; confRoutingInfo.reserved5[1] = 0; confRoutingInfo.reserved5[2] = 0; OSSA_WRITE_BE_32(agRoot, confRoutingInfo.routedSasAddressHi, 0, configSASAddressHi); OSSA_WRITE_BE_32(agRoot, confRoutingInfo.routedSasAddressLo, 0, configSASAddressLo); for ( i = 0; i < 16; i ++ ) { confRoutingInfo.reserved6[i] = 0; } tdSMPStart(tiRoot, agRoot, oneExpander->tdDevice, SMP_CONFIGURE_ROUTING_INFORMATION, (bit8 *)&confRoutingInfo, sizeof(smpReqConfigureRouteInformation_t), AGSA_SMP_INIT_REQ, agNULL, 0); oneExpander->currentIndex[phyId] ++; } else { TI_DBG1(("tdsaSASRoutingEntryAdd: Discovery Error routing index overflow for currentIndex=%d, routingIndex=%d\n", oneExpander->currentIndex[phyId], oneExpander->routingIndex)); tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); ret = agFALSE; } return ret; } /***************************************************************************** *! \brief tdsaConfigRoutingInfoRespRcvd * * Purpose: This function processes Configure Routing Information response. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param agRoot: Pointer to chip/driver Instance. * \param oneDeviceData: Pointer to the device data. * \param frameHeader: Pointer to SMP frame header. * \param frameHandle: A Handle used to refer to the response frame * * \return: * None * * \note: * *****************************************************************************/ /* needs to traverse only upstream not downstream */ osGLOBAL void tdsaConfigRoutingInfoRespRcvd( tiRoot_t *tiRoot, agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, tdsaDeviceData_t *oneDeviceData, tdssSMPFrameHeader_t *frameHeader, agsaFrameHandle_t frameHandle ) { tdsaExpander_t *oneExpander = oneDeviceData->tdExpander; tdsaExpander_t *UpStreamExpander; tdsaExpander_t *DownStreamExpander; tdsaExpander_t *ReturningExpander; tdsaExpander_t *ConfigurableExpander; tdsaPortContext_t *onePortContext; tdsaDeviceData_t *ReturningExpanderDeviceData; bit32 dupConfigSASAddr = agFALSE; TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: start\n")); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); onePortContext = oneDeviceData->tdPortContext; if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaConfigRoutingInfoRespRcvd: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } if ( frameHeader->smpFunctionResult == SMP_FUNCTION_ACCEPTED || frameHeader->smpFunctionResult == PHY_VACANT ) { DownStreamExpander = oneExpander->tdCurrentDownStreamExpander; if (DownStreamExpander != agNULL) { DownStreamExpander->currentUpStreamPhyIndex ++; TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: DownStreamExpander->currentUpStreamPhyIndex %d\n", DownStreamExpander->currentUpStreamPhyIndex)); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: DownStreamExpander->numOfUpStreamPhys %d\n", DownStreamExpander->numOfUpStreamPhys)); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: DownStreamExpander addrHi 0x%08x\n", DownStreamExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: DownStreamExpander addrLo 0x%08x\n", DownStreamExpander->tdDevice->SASAddressID.sasAddressLo)); } oneExpander->currentDownStreamPhyIndex++; TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: oneExpander->currentDownStreamPhyIndex %d oneExpander->numOfDownStreamPhys %d\n", oneExpander->currentDownStreamPhyIndex, oneExpander->numOfDownStreamPhys)); if ( DownStreamExpander != agNULL) { if (DownStreamExpander->currentUpStreamPhyIndex < DownStreamExpander->numOfUpStreamPhys) { TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: first if\n")); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: DownStreamExpander->currentUpStreamPhyIndex %d\n", DownStreamExpander->currentUpStreamPhyIndex)); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: DownStreamExpander->upStreamPhys[] %d\n", DownStreamExpander->upStreamPhys[DownStreamExpander->currentUpStreamPhyIndex])); tdsaSASRoutingEntryAdd(tiRoot, oneExpander, DownStreamExpander->upStreamPhys[DownStreamExpander->currentUpStreamPhyIndex], oneExpander->configSASAddressHi, oneExpander->configSASAddressLo ); } else { /* traversing up till discovery Root onePortContext->discovery.RootExp */ TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: else\n")); UpStreamExpander = oneExpander->tdUpStreamExpander; ConfigurableExpander = tdsaFindConfigurableExp(tiRoot, onePortContext, oneExpander); if (UpStreamExpander != agNULL) { TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: UpStreamExpander addrHi 0x%08x\n", UpStreamExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: UpStreamExpander addrLo 0x%08x\n", UpStreamExpander->tdDevice->SASAddressID.sasAddressLo)); dupConfigSASAddr = tdsaDuplicateConfigSASAddr(tiRoot, ConfigurableExpander, oneExpander->configSASAddressHi, oneExpander->configSASAddressLo ); if ( ConfigurableExpander != agNULL && dupConfigSASAddr == agFALSE) { TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: else if\n")); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: ConfigurableExpander addrHi 0x%08x\n", ConfigurableExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: ConfigurableExpander addrLo 0x%08x\n", ConfigurableExpander->tdDevice->SASAddressID.sasAddressLo)); UpStreamExpander->tdCurrentDownStreamExpander = oneExpander; ConfigurableExpander->currentDownStreamPhyIndex = tdsaFindCurrentDownStreamPhyIndex(tiRoot, ConfigurableExpander); ConfigurableExpander->tdReturnginExpander = oneExpander->tdReturnginExpander; DownStreamExpander->currentUpStreamPhyIndex = 0; TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: ConfigurableExpander->currentDownStreamPhyIndex %d\n", ConfigurableExpander->currentDownStreamPhyIndex)); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: ConfigurableExpander->downStreamPhys[] %d\n", ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex])); tdsaSASRoutingEntryAdd(tiRoot, ConfigurableExpander, ConfigurableExpander->downStreamPhys[ConfigurableExpander->currentDownStreamPhyIndex], oneExpander->configSASAddressHi, oneExpander->configSASAddressLo ); } else { /* going back to where it was */ /* ConfigRoutingInfo is done for a target */ TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: $$$$$$ my change $$$$$ \n")); ReturningExpander = oneExpander->tdReturnginExpander; DownStreamExpander->currentUpStreamPhyIndex = 0; /* debugging */ if (ReturningExpander != agNULL) { TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: ReturningExpander addrHi 0x%08x\n", ReturningExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: ReturningExpander addrLo 0x%08x\n", ReturningExpander->tdDevice->SASAddressID.sasAddressLo)); ReturningExpanderDeviceData = ReturningExpander->tdDevice; /* No longer in DISCOVERY_CONFIG_ROUTING */ onePortContext->discovery.status = DISCOVERY_DOWN_STREAM; /* If not the last phy */ if ( ReturningExpander->discoveringPhyId < ReturningExpanderDeviceData->numOfPhys ) { TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: More Phys to discover\n")); /* continue discovery for the next phy */ /* needs to send only one Discovery not multiple times */ if (ReturningExpander->discoverSMPAllowed == agTRUE) { tdsaDiscoverSend(tiRoot, ReturningExpanderDeviceData); } ReturningExpander->discoverSMPAllowed = agFALSE; } /* If the last phy */ else { TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: No More Phys\n")); ReturningExpander->discoverSMPAllowed = agTRUE; /* remove the expander from the discovering list */ tdssSASDiscoveringExpanderRemove(tiRoot, onePortContext, ReturningExpander); /* continue downstream discovering */ tdsaSASDownStreamDiscovering(tiRoot, onePortContext, ReturningExpanderDeviceData); //DownStreamExpander } } else { TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: ReturningExpander is NULL\n")); } } } else { TI_DBG3(("tdsaConfigRoutingInfoRespRcvd: UpStreamExpander is NULL\n")); } } } } else { TI_DBG1(("tdsaConfigRoutingInfoRespRcvd: Discovery Error SMP function return result error=%x\n", frameHeader->smpFunctionResult)); tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } return; } /***************************************************************************** *! \brief tdsaReportPhySataSend * * Purpose: This function sends Report Phy SATA to a device. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneDeviceData: Pointer to the device data. * \param phyId: Phy Identifier. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaReportPhySataSend( tiRoot_t *tiRoot, tdsaDeviceData_t *oneDeviceData, bit8 phyId ) { agsaRoot_t *agRoot; tdsaExpander_t *oneExpander; tdsaPortContext_t *onePortContext; smpReqReportPhySata_t smpReportPhySataReq; TI_DBG3(("tdsaReportPhySataSend: start\n")); agRoot = oneDeviceData->agRoot; onePortContext = oneDeviceData->tdPortContext; oneExpander = oneDeviceData->tdExpander; if (onePortContext == agNULL) { TI_DBG1(("tdsaReportPhySataSend: Error!!! portcontext is NULL\n")); } if (oneExpander == agNULL) { TI_DBG1(("tdsaReportPhySataSend: Error!!! expander is NULL\n")); return; } TI_DBG3(("tdsaReportPhySataSend: device %p did %d\n", oneDeviceData, oneDeviceData->id)); TI_DBG3(("tdsaReportPhySataSend: phyid %d\n", phyId)); oneExpander->tdDeviceToProcess = oneDeviceData; osti_memset(&smpReportPhySataReq, 0, sizeof(smpReqReportPhySata_t)); smpReportPhySataReq.phyIdentifier = phyId; tdSMPStart( tiRoot, agRoot, oneExpander->tdDevice, SMP_REPORT_PHY_SATA, (bit8 *)&smpReportPhySataReq, sizeof(smpReqReportPhySata_t), AGSA_SMP_INIT_REQ, agNULL, 0 ); return; } /***************************************************************************** *! \brief tdsaReportPhySataRcvd * * Purpose: This function processes Report Phy SATA response. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param agRoot: Pointer to chip/driver Instance. * \param oneDeviceData: Pointer to the device data. * \param frameHeader: Pointer to SMP frame header. * \param frameHandle: A Handle used to refer to the response frame * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaReportPhySataRcvd( tiRoot_t *tiRoot, agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, tdsaDeviceData_t *oneDeviceData, tdssSMPFrameHeader_t *frameHeader, agsaFrameHandle_t frameHandle ) { smpRespReportPhySata_t SMPreportPhySataResp; smpRespReportPhySata_t *pSMPReportPhySataResp; tdsaExpander_t *oneExpander = oneDeviceData->tdExpander; tdsaPortContext_t *onePortContext; agsaFisRegDeviceToHost_t *fis; tdsaDeviceData_t *SataDevice; #ifndef DIRECT_SMP tdssSMPRequestBody_t *tdSMPRequestBody; #endif TI_DBG3(("tdsaReportPhySataRcvd: start\n")); TI_DBG3(("tdsaReportPhySataRcvd: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG3(("tdsaReportPhySataRcvd: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); #ifndef DIRECT_SMP tdSMPRequestBody = (tdssSMPRequestBody_t *)agIORequest->osData; #endif /* get the current sata device hanlde stored in the expander structure */ SataDevice = oneExpander->tdDeviceToProcess; pSMPReportPhySataResp = &SMPreportPhySataResp; #ifdef DIRECT_SMP saFrameReadBlock(agRoot, frameHandle, 4, pSMPReportPhySataResp, sizeof(smpRespReportPhySata_t)); #else saFrameReadBlock(agRoot, tdSMPRequestBody->IndirectSMPResp, 4, pSMPReportPhySataResp, sizeof(smpRespReportPhySata_t)); #endif //tdhexdump("tdsaReportPhySataRcvd", (bit8 *)pSMPReportPhySataResp, sizeof(smpRespReportPhySata_t)); #ifndef DIRECT_SMP ostiFreeMemory( tiRoot, tdSMPRequestBody->IndirectSMPReqosMemHandle, tdSMPRequestBody->IndirectSMPReqLen ); ostiFreeMemory( tiRoot, tdSMPRequestBody->IndirectSMPResposMemHandle, tdSMPRequestBody->IndirectSMPRespLen ); #endif onePortContext = oneDeviceData->tdPortContext; if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaReportPhySataRcvd: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } if (SataDevice == agNULL) { TI_DBG1(("tdsaReportPhySataRcvd: SataDevice is NULL, wrong\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return; } if ( frameHeader->smpFunctionResult == SMP_FUNCTION_ACCEPTED || frameHeader->smpFunctionResult == PHY_VACANT ) { fis = (agsaFisRegDeviceToHost_t*) &SMPreportPhySataResp.regDevToHostFis; if (fis->h.fisType == REG_DEV_TO_HOST_FIS) { /* save signature */ TI_DBG3(("tdsaReportPhySataRcvd: saves the signature\n")); /* saves signature */ SataDevice->satDevData.satSignature[0] = fis->d.sectorCount; SataDevice->satDevData.satSignature[1] = fis->d.lbaLow; SataDevice->satDevData.satSignature[2] = fis->d.lbaMid; SataDevice->satDevData.satSignature[3] = fis->d.lbaHigh; SataDevice->satDevData.satSignature[4] = fis->d.device; SataDevice->satDevData.satSignature[5] = 0; SataDevice->satDevData.satSignature[6] = 0; SataDevice->satDevData.satSignature[7] = 0; TI_DBG3(("tdsaReportPhySataRcvd: SATA Signature = %02x %02x %02x %02x %02x\n", SataDevice->satDevData.satSignature[0], SataDevice->satDevData.satSignature[1], SataDevice->satDevData.satSignature[2], SataDevice->satDevData.satSignature[3], SataDevice->satDevData.satSignature[4])); /* no longer, discovery sends sata identify device command tdsaSATAIdentifyDeviceCmdSend(tiRoot, SataDevice); */ SataDevice = tdsaFindRightDevice(tiRoot, onePortContext, SataDevice); tdsaDiscoveringStpSATADevice(tiRoot, onePortContext, SataDevice); } else { TI_DBG3(("tdsaReportPhySataRcvd: getting next stp bride\n")); SataDevice = tdsaFindRightDevice(tiRoot, onePortContext, SataDevice); tdsaDiscoveringStpSATADevice(tiRoot, onePortContext, SataDevice); } } else { TI_DBG3(("tdsaReportPhySataRcvd: siReportPhySataRcvd SMP function return result %x\n", frameHeader->smpFunctionResult)); tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } return; } /***************************************************************************** *! \brief tdsaSASExpanderUpStreamPhyAdd * * Purpose: This function adds upstream expander to a specfic phy. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneExpander: Pointer to the expander data. * \param phyId: Phy Identifier. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASExpanderUpStreamPhyAdd( tiRoot_t *tiRoot, tdsaExpander_t *oneExpander, bit8 phyId ) { bit32 i; bit32 hasSet = agFALSE; TI_DBG3(("tdsaSASExpanderUpStreamPhyAdd: start, phyid %d\n", phyId)); TI_DBG3(("tdsaSASExpanderUpStreamPhyAdd: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaSASExpanderUpStreamPhyAdd: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); TI_DBG3(("tdsaSASExpanderUpStreamPhyAdd: phyid %d numOfUpStreamPhys %d\n", phyId, oneExpander->numOfUpStreamPhys)); for ( i = 0; i < oneExpander->numOfUpStreamPhys; i ++ ) { if ( oneExpander->upStreamPhys[i] == phyId ) { hasSet = agTRUE; break; } } if ( hasSet == agFALSE ) { oneExpander->upStreamPhys[oneExpander->numOfUpStreamPhys ++] = phyId; } TI_DBG3(("tdsaSASExpanderUpStreamPhyAdd: AFTER phyid %d numOfUpStreamPhys %d\n", phyId, oneExpander->numOfUpStreamPhys)); /* for debugging */ for ( i = 0; i < oneExpander->numOfUpStreamPhys; i ++ ) { TI_DBG3(("tdsaSASExpanderUpStreamPhyAdd: index %d upstream[index] %d\n", i, oneExpander->upStreamPhys[i])); } return; } /* just add phys in downstream in configurable expnader */ /***************************************************************************** *! \brief tdsaSASExpanderDownStreamPhyAdd * * Purpose: This function adds downstream expander to a specfic phy. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneExpander: Pointer to the expander data. * \param phyId: Phy Identifier. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSASExpanderDownStreamPhyAdd( tiRoot_t *tiRoot, tdsaExpander_t *oneExpander, bit8 phyId ) { bit32 i; bit32 hasSet = agFALSE; TI_DBG3(("tdsaSASExpanderDownStreamPhyAdd: start, phyid %d\n", phyId)); TI_DBG3(("tdsaSASExpanderDownStreamPhyAdd: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaSASExpanderDownStreamPhyAdd: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); TI_DBG3(("tdsaSASExpanderDownStreamPhyAdd: phyid %d numOfDownStreamPhys %d\n", phyId, oneExpander->numOfDownStreamPhys)); for ( i = 0; i < oneExpander->numOfDownStreamPhys; i ++ ) { if ( oneExpander->downStreamPhys[i] == phyId ) { hasSet = agTRUE; break; } } if ( hasSet == agFALSE ) { oneExpander->downStreamPhys[oneExpander->numOfDownStreamPhys ++] = phyId; } TI_DBG3(("tdsaSASExpanderDownStreamPhyAdd: AFTER phyid %d numOfDownStreamPhys %d\n", phyId, oneExpander->numOfDownStreamPhys)); /* for debugging */ for ( i = 0; i < oneExpander->numOfDownStreamPhys; i ++ ) { TI_DBG3(("tdsaSASExpanderDownStreamPhyAdd: index %d downstream[index] %d\n", i, oneExpander->downStreamPhys[i])); } return; } /* oneExpander is the configurable expander of interest phyId is the first phyID in upStreamPhys[0] of downExpander */ /***************************************************************************** *! \brief tdsaFindCurrentDownStreamPhyIndex * * Purpose: This function finds CurrentDownStreamPhyIndex from a configurable * expander. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param oneExpander: Pointer to the configuralbe expander data. * * \return: * CurrentDownStreamPhyIndex * * *****************************************************************************/ osGLOBAL bit16 tdsaFindCurrentDownStreamPhyIndex( tiRoot_t *tiRoot, tdsaExpander_t *oneExpander ) { tdsaExpander_t *DownStreamExpander; bit16 index = 0; bit16 i; bit8 phyId = 0; TI_DBG3(("tdsaFindCurrentDownStreamPhyIndex: start\n")); if (oneExpander == agNULL) { TI_DBG3(("tdsaFindCurrentDownStreamPhyIndex: wrong!!! oneExpander is NULL\n")); return 0; } DownStreamExpander = oneExpander->tdCurrentDownStreamExpander; if (DownStreamExpander == agNULL) { TI_DBG3(("tdsaFindCurrentDownStreamPhyIndex: wrong!!! DownStreamExpander is NULL\n")); return 0; } TI_DBG3(("tdsaFindCurrentDownStreamPhyIndex: exp addrHi 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaFindCurrentDownStreamPhyIndex: exp addrLo 0x%08x\n", oneExpander->tdDevice->SASAddressID.sasAddressLo)); TI_DBG3(("tdsaFindCurrentDownStreamPhyIndex: downstream exp addrHi 0x%08x\n", DownStreamExpander->tdDevice->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaFindCurrentDownStreamPhyIndex: downstream exp addrLo 0x%08x\n", DownStreamExpander->tdDevice->SASAddressID.sasAddressLo)); TI_DBG3(("tdsaFindCurrentDownStreamPhyIndex: numOfDownStreamPhys %d\n", oneExpander->numOfDownStreamPhys)); phyId = DownStreamExpander->upStreamPhys[0]; TI_DBG3(("tdsaFindCurrentDownStreamPhyIndex: phyId %d\n", phyId)); for (i=0; inumOfDownStreamPhys;i++) { if (oneExpander->downStreamPhys[i] == phyId) { index = i; break; } } TI_DBG3(("tdsaFindCurrentDownStreamPhyIndex: index %d\n", index)); return index; } /***************************************************************************** *! \brief tdsaPortSASDeviceFind * * Purpose: Given SAS address, this function finds a device with that SAS address * in the device list. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param sasAddrLo: Lower 4 byte of SAS address. * \param sasAddrHi: Upper 4 byte of SAS address. * * \return: * agNULL When no device found * Pointer to device When device is found * * \note: * *****************************************************************************/ osGLOBAL tdsaDeviceData_t * tdsaPortSASDeviceFind( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, bit32 sasAddrLo, bit32 sasAddrHi ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdsaDeviceData_t *oneDeviceData, *RetDeviceData=agNULL; tdList_t *DeviceListList; TI_DBG3(("tdsaPortSASDeviceFind: start\n")); TD_ASSERT((agNULL != tiRoot), ""); TD_ASSERT((agNULL != onePortContext), ""); tdsaSingleThreadedEnter(tiRoot, TD_DEVICE_LOCK); /* find a device's existence */ DeviceListList = tdsaAllShared->MainDeviceList.flink; if (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_FULL_START) { TI_DBG3(("tdsaPortSASDeviceFind: Full discovery\n")); while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if ((oneDeviceData->SASAddressID.sasAddressHi == sasAddrHi) && (oneDeviceData->SASAddressID.sasAddressLo == sasAddrLo) && (oneDeviceData->valid == agTRUE) && (oneDeviceData->tdPortContext == onePortContext) ) { TI_DBG3(("tdsaPortSASDeviceFind: Found pid %d did %d\n", onePortContext->id, oneDeviceData->id)); TI_DBG3(("tdsaPortSASDeviceFind: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaPortSASDeviceFind: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); RetDeviceData = oneDeviceData; break; } DeviceListList = DeviceListList->flink; } } else { /* incremental discovery */ TI_DBG3(("tdsaPortSASDeviceFind: Incremental discovery\n")); while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if ((oneDeviceData->SASAddressID.sasAddressHi == sasAddrHi) && (oneDeviceData->SASAddressID.sasAddressLo == sasAddrLo) && (oneDeviceData->valid2 == agTRUE) && (oneDeviceData->tdPortContext == onePortContext) ) { TI_DBG3(("tdsaPortSASDeviceFind: Found pid %d did %d\n", onePortContext->id, oneDeviceData->id)); TI_DBG3(("tdsaPortSASDeviceFind: sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressHi)); TI_DBG3(("tdsaPortSASDeviceFind: sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); RetDeviceData = oneDeviceData; break; } DeviceListList = DeviceListList->flink; } } tdsaSingleThreadedLeave(tiRoot, TD_DEVICE_LOCK); return RetDeviceData; } /* include both sas and stp-sata targets*/ /***************************************************************************** *! \brief tdsaPortSASDeviceAdd * * Purpose: This function adds the SAS device to the device list. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param sasIdentify: SAS identify address frame. * \param sasInitiator: SAS initiator. * \param connectionRate: Connection Rate. * \param itNexusTimeout: IT NEXUS timeout value. * \param firstBurstSize: First Burst Size. * \param deviceType: Device Type. * * \return: * Pointer to device data. * * \note: * *****************************************************************************/ GLOBAL tdsaDeviceData_t * tdsaPortSASDeviceAdd( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, agsaSASIdentify_t sasIdentify, bit32 sasInitiator, bit8 connectionRate, bit32 itNexusTimeout, bit32 firstBurstSize, bit32 deviceType, tdsaDeviceData_t *oneExpDeviceData, bit8 phyID ) { tdsaDeviceData_t *oneDeviceData = agNULL; bit8 dev_s_rate = 0; bit8 sasorsata = 1; // bit8 devicetype; tdsaSASSubID_t agSASSubID; tdsaDeviceData_t *oneAttachedExpDeviceData = agNULL; TI_DBG3(("tdsaPortSASDeviceAdd: start\n")); TI_DBG3(("tdsaPortSASDeviceAdd: connectionRate %d\n", connectionRate)); agSASSubID.sasAddressHi = SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify); agSASSubID.sasAddressLo = SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify); agSASSubID.initiator_ssp_stp_smp = sasIdentify.initiator_ssp_stp_smp; agSASSubID.target_ssp_stp_smp = sasIdentify.target_ssp_stp_smp; /* old device and already registered to LL; added by link-up event */ if ( agFALSE == tdssNewSASorNot( onePortContext->agRoot, onePortContext, &agSASSubID ) ) { /* old device and already registered to LL; added by link-up event */ TI_DBG3(("tdsaPortSASDeviceAdd: OLD qqqq initiator_ssp_stp_smp %d target_ssp_stp_smp %d\n", agSASSubID.initiator_ssp_stp_smp, agSASSubID.target_ssp_stp_smp)); /* find the old device */ oneDeviceData = tdssNewAddSASToSharedcontext( onePortContext->agRoot, onePortContext, &agSASSubID, oneExpDeviceData, phyID ); if (oneDeviceData == agNULL) { TI_DBG1(("tdsaPortSASDeviceAdd: no more device!!! oneDeviceData is null\n")); } /* If a device is allocated */ if ( oneDeviceData != agNULL ) { TI_DBG3(("tdsaPortSASDeviceAdd: sasAddressHi 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify))); TI_DBG3(("tdsaPortSASDeviceAdd: sasAddressLo 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify))); oneDeviceData->sasIdentify = sasIdentify; TI_DBG3(("tdsaPortSASDeviceAdd: sasAddressHi 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify))); TI_DBG3(("tdsaPortSASDeviceAdd: sasAddressLo 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify))); /* parse sasIDframe to fill in agDeviceInfo */ DEVINFO_PUT_SMPTO(&oneDeviceData->agDeviceInfo, DEFAULT_SMP_TIMEOUT); DEVINFO_PUT_ITNEXUSTO(&oneDeviceData->agDeviceInfo, (bit16)itNexusTimeout); DEVINFO_PUT_FBS(&oneDeviceData->agDeviceInfo, (bit16)firstBurstSize); DEVINFO_PUT_FLAG(&oneDeviceData->agDeviceInfo, 1); oneDeviceData->SASSpecDeviceType = (bit8)(SA_IDFRM_GET_DEVICETTYPE(&sasIdentify)); /* adjusting connectionRate */ oneAttachedExpDeviceData = oneDeviceData->ExpDevice; if (oneAttachedExpDeviceData != agNULL) { connectionRate = (bit8)(MIN(connectionRate, DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo))); TI_DBG3(("tdsaPortSASDeviceAdd: 1st connectionRate 0x%x DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo) 0x%x\n", connectionRate, DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo))); } else { TI_DBG3(("tdsaPortSASDeviceAdd: 1st oneAttachedExpDeviceData is NULL\n")); } /* Device Type, SAS or SATA, connection rate; bit7 --- bit0 */ sasorsata = (bit8)deviceType; /* sTSDK spec device typ */ dev_s_rate = (bit8)(dev_s_rate | (sasorsata << 4)); dev_s_rate = (bit8)(dev_s_rate | connectionRate); DEVINFO_PUT_DEV_S_RATE(&oneDeviceData->agDeviceInfo, dev_s_rate); DEVINFO_PUT_SAS_ADDRESSLO( &oneDeviceData->agDeviceInfo, SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify) ); DEVINFO_PUT_SAS_ADDRESSHI( &oneDeviceData->agDeviceInfo, SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify) ); oneDeviceData->agContext.osData = oneDeviceData; oneDeviceData->agContext.sdkData = agNULL; } return oneDeviceData; } /* old device */ /* new device */ TI_DBG3(("tdsaPortSASDeviceAdd: NEW qqqq initiator_ssp_stp_smp %d target_ssp_stp_smp %d\n", agSASSubID.initiator_ssp_stp_smp, agSASSubID.target_ssp_stp_smp)); /* allocate a new device and set the valid bit */ oneDeviceData = tdssNewAddSASToSharedcontext( onePortContext->agRoot, onePortContext, &agSASSubID, oneExpDeviceData, phyID ); if (oneDeviceData == agNULL) { TI_DBG1(("tdsaPortSASDeviceAdd: no more device!!! oneDeviceData is null\n")); } /* If a device is allocated */ if ( oneDeviceData != agNULL ) { TI_DBG3(("tdsaPortSASDeviceAdd: sasAddressHi 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSHI(&sasIdentify))); TI_DBG3(("tdsaPortSASDeviceAdd: sasAddressLo 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSLO(&sasIdentify))); oneDeviceData->sasIdentify = sasIdentify; TI_DBG3(("tdsaPortSASDeviceAdd: sasAddressHi 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify))); TI_DBG3(("tdsaPortSASDeviceAdd: sasAddressLo 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify))); /* parse sasIDframe to fill in agDeviceInfo */ DEVINFO_PUT_SMPTO(&oneDeviceData->agDeviceInfo, DEFAULT_SMP_TIMEOUT); DEVINFO_PUT_ITNEXUSTO(&oneDeviceData->agDeviceInfo, (bit16)itNexusTimeout); DEVINFO_PUT_FBS(&oneDeviceData->agDeviceInfo, (bit16)firstBurstSize); DEVINFO_PUT_FLAG(&oneDeviceData->agDeviceInfo, 1); oneDeviceData->SASSpecDeviceType = (bit8)(SA_IDFRM_GET_DEVICETTYPE(&sasIdentify)); /* adjusting connectionRate */ oneAttachedExpDeviceData = oneDeviceData->ExpDevice; if (oneAttachedExpDeviceData != agNULL) { connectionRate = (bit8)(MIN(connectionRate, DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo))); TI_DBG3(("tdsaPortSASDeviceAdd: 2nd connectionRate 0x%x DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo) 0x%x\n", connectionRate, DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo))); } else { TI_DBG3(("tdsaPortSASDeviceAdd: 2nd oneAttachedExpDeviceData is NULL\n")); } /* Device Type, SAS or SATA, connection rate; bit7 --- bit0 */ sasorsata = (bit8)deviceType; dev_s_rate = (bit8)(dev_s_rate | (sasorsata << 4)); dev_s_rate = (bit8)(dev_s_rate | connectionRate); DEVINFO_PUT_DEV_S_RATE(&oneDeviceData->agDeviceInfo, dev_s_rate); DEVINFO_PUT_SAS_ADDRESSLO( &oneDeviceData->agDeviceInfo, SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify) ); DEVINFO_PUT_SAS_ADDRESSHI( &oneDeviceData->agDeviceInfo, SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify) ); oneDeviceData->agContext.osData = oneDeviceData; oneDeviceData->agContext.sdkData = agNULL; TI_DBG3(("tdsaPortSASDeviceAdd: did %d\n", oneDeviceData->id)); /* don't add and register initiator for T2D */ if ( (((sasIdentify.initiator_ssp_stp_smp & DEVICE_SSP_BIT) == DEVICE_SSP_BIT) && ((sasIdentify.target_ssp_stp_smp & DEVICE_SSP_BIT) != DEVICE_SSP_BIT)) || (((sasIdentify.initiator_ssp_stp_smp & DEVICE_STP_BIT) == DEVICE_STP_BIT) && ((sasIdentify.target_ssp_stp_smp & DEVICE_SSP_BIT) != DEVICE_SSP_BIT)) ) { TI_DBG1(("tdsaPortSASDeviceAdd: initiator. no add and registration\n")); TI_DBG1(("tdsaPortSASDeviceAdd: sasAddressHi 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify))); TI_DBG1(("tdsaPortSASDeviceAdd: sasAddressLo 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify))); } else { if (oneDeviceData->registered == agFALSE) { TI_DBG2(("tdsaPortSASDeviceAdd: did %d\n", oneDeviceData->id)); saRegisterNewDevice( /* tdsaPortSASDeviceAdd */ onePortContext->agRoot, &oneDeviceData->agContext, tdsaRotateQnumber(tiRoot, oneDeviceData), &oneDeviceData->agDeviceInfo, onePortContext->agPortContext, 0 ); } } } return oneDeviceData; } /***************************************************************************** *! \brief tdsaDiscoveryResetProcessed * * Purpose: This function called to reset "processed flag" of device belong to * a specified port. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaDiscoveryResetProcessed( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; TI_DBG6(("tdsaDiscoveryResetProcessed: start\n")); /* reinitialize the device data belonging to this portcontext */ DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); TI_DBG6(("tdsaDiscoveryResetProcessed: loop did %d\n", oneDeviceData->id)); if (oneDeviceData->tdPortContext == onePortContext) { TI_DBG6(("tdsaDiscoveryResetProcessed: resetting procssed flag\n")); oneDeviceData->processed = agFALSE; } DeviceListList = DeviceListList->flink; } return; } /***************************************************************************** *! \brief tdsaSATADiscoverDone * * Purpose: This function called to finish up SATA discovery. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param flag: status of discovery (success or failure). * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSATADiscoverDone( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, bit32 flag ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; TI_DBG3(("tdsaSATADiscoverDone: start\n")); tdsaDiscoveryResetProcessed(tiRoot, onePortContext); if (onePortContext->discovery.SeenBC == agTRUE) { TI_DBG3(("tdsaSATADiscoverDone: broadcast change; discover again\n")); tdssInternalRemovals(onePortContext->agRoot, onePortContext ); /* processed broadcast change */ onePortContext->discovery.SeenBC = agFALSE; if (tdsaAllShared->ResetInDiscovery != 0 && onePortContext->discovery.ResetTriggerred == agTRUE) { TI_DBG1(("tdsaSATADiscoverDone: tdsaBCTimer\n")); tdsaBCTimer(tiRoot, onePortContext); } else { tdsaDiscover( tiRoot, onePortContext, TDSA_DISCOVERY_TYPE_SAS, TDSA_DISCOVERY_OPTION_INCREMENTAL_START ); } } else { onePortContext->DiscoveryState = ITD_DSTATE_COMPLETED; if (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_FULL_START) { if (flag == tiSuccess) { #ifdef AGTIAPI_CTL tdsaContext_t *tdsaAllShared = &((tdsaRoot_t*)tiRoot->tdData)->tdsaAllShared; if (tdsaAllShared->SASConnectTimeLimit) tdsaCTLSet(tiRoot, onePortContext, tiIntrEventTypeDiscovery, tiDiscOK); else #endif ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscOK, agNULL ); } else { TI_DBG1(("tdsaSATADiscoverDone: Error; clean up\n")); tdssDiscoveryErrorRemovals(onePortContext->agRoot, onePortContext ); ostiInitiatorEvent( tiRoot, onePortContext->tiPortalContext, agNULL, tiIntrEventTypeDiscovery, tiDiscFailed, agNULL ); } } else { if (flag == tiSuccess) { tdssReportChanges(onePortContext->agRoot, onePortContext ); } else { tdssReportRemovals(onePortContext->agRoot, onePortContext, agFALSE ); } } } #ifdef TBD /* ACKing BC */ tdsaAckBC(tiRoot, onePortContext); #endif return; } osGLOBAL void tdsaAckBC( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { #ifdef TBD /* not yet */ agsaEventSource_t eventSource[TD_MAX_NUM_PHYS]; bit32 HwAckSatus = AGSA_RC_SUCCESS; int i; TI_DBG3(("tdsaAckBC: start\n")); for (i=0;iBCPhyID[i] == agTRUE) { /* saHwEventAck() */ eventSource[i].agPortContext = onePortContext->agPortContext; eventSource[i].event = OSSA_HW_EVENT_BROADCAST_CHANGE; /* phy ID */ eventSource[i].param = i; HwAckSatus = saHwEventAck( onePortContext->agRoot, agNULL, /* agContext */ 0, &eventSource[i], /* agsaEventSource_t */ 0, 0 ); TI_DBG3(("tdsaAckBC: calling saHwEventAck\n")); if ( HwAckSatus != AGSA_RC_SUCCESS) { TI_DBG1(("tdsaAckBC: failing in saHwEventAck; status %d\n", HwAckSatus)); return; } } onePortContext->BCPhyID[i] = agFALSE; } #endif } #ifdef SATA_ENABLE /***************************************************************************** *! \brief tdsaSATAFullDiscover * * Purpose: This function is called to trigger full SATA topology discovery * within a portcontext. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * * \return: * tiSuccess Discovery initiated. * tiError Discovery could not be initiated at this time. * * \note: * *****************************************************************************/ osGLOBAL bit32 tdsaSATAFullDiscover( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { bit32 ret = tiSuccess; tdsaDeviceData_t *oneDeviceData = agNULL; bit32 deviceType; bit8 phyRate = SAS_CONNECTION_RATE_3_0G; bit32 i; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; // tdsaDeviceData_t *tdsaDeviceData = (tdsaDeviceData_t *)tdsaAllShared->DeviceMem; tdsaDeviceData_t *tdsaDeviceData; tdList_t *DeviceListList; TI_DBG3(("tdsaSATAFullDiscover: start\n")); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaSATAFullDiscover: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return tiError; } phyRate = onePortContext->LinkRate; DeviceListList = tdsaAllShared->MainDeviceList.flink; tdsaDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); /* If port is SATA mode */ /* Native SATA mode is decided in ossaHWCB() SAS_LINK_UP or SATA_LINK_UP */ if (onePortContext->nativeSATAMode == agTRUE) { /* Decode device type */ deviceType = tdssSATADeviceTypeDecode(onePortContext->remoteSignature); /* Create a device descriptor for the SATA device attached to the port */ if ( deviceType == SATA_PM_DEVICE) { TI_DBG3(("tdsaSATAFullDiscover: Found a PM device\n")); oneDeviceData = tdsaPortSATADeviceAdd( tiRoot, onePortContext, agNULL, onePortContext->remoteSignature, agTRUE, 0xF, phyRate, agNULL, 0xFF ); } else { /* already added in ossahwcb() in SATA link up */ TI_DBG3(("tdsaSATAFullDiscover: Found a DIRECT SATA device\n")); } /* Process for different device type */ switch ( deviceType ) { /* if it's PM */ case SATA_PM_DEVICE: { TI_DBG3(("tdsaSATAFullDiscover: Process a PM device\n")); /* For each port of the PM */ for ( i = 0; i < SATA_MAX_PM_PORTS; i ++ ) { /* Read the signature */ /* Decode the device type */ /* Create device descriptor */ /* Callback with the discovered devices */ } break; } /* if it's ATA device */ case SATA_ATA_DEVICE: case SATA_ATAPI_DEVICE: { TI_DBG3(("tdsaSATAFullDiscover: Process an ATA device. Sending Identify Device cmd\n")); /* to-check: for this direct attached one, already added and do nothing */ /* no longer, discovery sends sata identify device command */ //tdsaSATAIdentifyDeviceCmdSend(tiRoot, oneDeviceData); tdsaSATADiscoverDone(tiRoot, onePortContext, tiSuccess); break; } /* Other devices */ default: { /* callback */ TI_DBG3(("siSATAFullDiscover: Process OTHER SATA device. Just report the device\n")); break; } } } /* If port is SAS mode */ else { TI_DBG3(("tdsaSATAFullDiscover: Discovering attached STP devices starts....\n")); oneDeviceData = tdsaFindRightDevice(tiRoot, onePortContext, tdsaDeviceData); tdsaDiscoveringStpSATADevice(tiRoot, onePortContext, oneDeviceData); } return ret; } /* adding only direct attached SATA such as PM Other directly attached SATA device such as disk is reported by ossahwcb() in link up used in sata native mode */ /***************************************************************************** *! \brief tdsaPortSATADeviceAdd * * Purpose: This function adds the SATA device to the device list. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneSTPBridge: STP bridge. * \param Signature: SATA signature. * \param pm: Port Multiplier. * \param pmField: Port Multiplier field. * \param connectionRate: Connection Rate. * * \return: * Pointer to device data. * * \note: * *****************************************************************************/ GLOBAL tdsaDeviceData_t * tdsaPortSATADeviceAdd( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaDeviceData_t *oneSTPBridge, bit8 *Signature, bit8 pm, bit8 pmField, bit8 connectionRate, tdsaDeviceData_t *oneExpDeviceData, bit8 phyID ) { tdsaDeviceData_t *oneDeviceData = agNULL; agsaRoot_t *agRoot = onePortContext->agRoot; bit8 dev_s_rate = 0; bit8 sasorsata = SATA_DEVICE_TYPE; // bit8 devicetype = 0; bit8 flag = 0; bit8 TLR = 0; tdsaDeviceData_t *oneAttachedExpDeviceData = agNULL; TI_DBG3(("tdsaPortSATADeviceAdd: start\n")); /* sanity check */ TD_ASSERT((agNULL != tiRoot), ""); TD_ASSERT((agNULL != agRoot), ""); TD_ASSERT((agNULL != onePortContext), ""); TD_ASSERT((agNULL != Signature), ""); oneDeviceData = tdssNewAddSATAToSharedcontext( tiRoot, agRoot, onePortContext, agNULL, Signature, pm, pmField, connectionRate, oneExpDeviceData, phyID ); if (oneDeviceData == agNULL) { TI_DBG1(("tdsaPortSATADeviceAdd: no more device!!! oneDeviceData is null\n")); return agNULL; } flag = (bit8)((phyID << 4) | TLR); DEVINFO_PUT_SMPTO(&oneDeviceData->agDeviceInfo, DEFAULT_SMP_TIMEOUT); DEVINFO_PUT_ITNEXUSTO(&oneDeviceData->agDeviceInfo, 0xFFF); DEVINFO_PUT_FBS(&oneDeviceData->agDeviceInfo, 0); DEVINFO_PUT_FLAG(&oneDeviceData->agDeviceInfo, flag); /* adjusting connectionRate */ oneAttachedExpDeviceData = oneDeviceData->ExpDevice; if (oneAttachedExpDeviceData != agNULL) { connectionRate = (bit8)(MIN(connectionRate, DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo))); TI_DBG3(("tdsaPortSATADeviceAdd: 1st connectionRate 0x%x DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo) 0x%x\n", connectionRate, DEVINFO_GET_LINKRATE(&oneAttachedExpDeviceData->agDeviceInfo))); } else { TI_DBG3(("tdsaPortSATADeviceAdd: 1st oneAttachedExpDeviceData is NULL\n")); } /* Device Type, SAS or SATA, connection rate; bit7 --- bit0*/ // dev_s_rate = dev_s_rate | (devicetype << 6); dev_s_rate = (bit8)(dev_s_rate | (sasorsata << 4)); dev_s_rate = (bit8)(dev_s_rate | connectionRate); DEVINFO_PUT_DEV_S_RATE(&oneDeviceData->agDeviceInfo, dev_s_rate); osti_memset(&oneDeviceData->agDeviceInfo.sasAddressHi, 0, 4); osti_memset(&oneDeviceData->agDeviceInfo.sasAddressLo, 0, 4); oneDeviceData->agContext.osData = oneDeviceData; oneDeviceData->agContext.sdkData = agNULL; TI_DBG1(("tdsaPortSATADeviceAdd: did %d\n", oneDeviceData->id)); if (oneDeviceData->registered == agFALSE) { TI_DBG2(("tdsaPortSATADeviceAdd: did %d\n", oneDeviceData->id)); saRegisterNewDevice( /* tdsaPortSATADeviceAdd */ onePortContext->agRoot, &oneDeviceData->agContext, tdsaRotateQnumber(tiRoot, oneDeviceData), &oneDeviceData->agDeviceInfo, onePortContext->agPortContext, 0 ); } return oneDeviceData; } #endif /***************************************************************************** *! \brief tdsaFindRightDevice * * Purpose: This function returns device-to-be processed. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param tdsaDeviceData: Pointer to the starting device data. * * \return: * Pointer to device data. * * \note: * *****************************************************************************/ osGLOBAL tdsaDeviceData_t * tdsaFindRightDevice( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaDeviceData_t *tdsaDeviceData ) { tdList_t *DeviceListList; tdsaDeviceData_t *oneDeviceData = agNULL; bit32 found = agFALSE; TI_DBG3(("tdsaFindHeadDevice: start\n")); DeviceListList = tdsaDeviceData->MainLink.flink; while (DeviceListList != &(tdsaDeviceData->MainLink)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); TI_DBG3(("tdsaFindRightDevice: did %d STP %d SATA %d \n", onePortContext->id, DEVICE_IS_STP_TARGET(oneDeviceData), DEVICE_IS_SATA_DEVICE(oneDeviceData))); DeviceListList = DeviceListList->flink; } DeviceListList = tdsaDeviceData->MainLink.flink; while (DeviceListList != &(tdsaDeviceData->MainLink)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if ((oneDeviceData->registered == agTRUE) && (oneDeviceData->tdPortContext == onePortContext) && (oneDeviceData->processed == agFALSE) && (SA_IDFRM_IS_STP_TARGET(&oneDeviceData->sasIdentify) || SA_IDFRM_IS_SATA_DEVICE(&oneDeviceData->sasIdentify)) ) { TI_DBG3(("tdsaFindRightDevice: pid %d did %d\n", onePortContext->id, oneDeviceData->id)); oneDeviceData->processed = agTRUE; found = agTRUE; break; } DeviceListList = DeviceListList->flink; } if (found == agTRUE) { return oneDeviceData; } else { return agNULL; } } // tdsaDeviceData is head of list /***************************************************************************** *! \brief tdsaDiscoveringStpSATADevice * * Purpose: For each device in the device list, this function peforms * SATA discovery. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the heade of device list. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaDiscoveringStpSATADevice( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaDeviceData_t *oneDeviceData ) { bit32 status; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; // tdsaDeviceData_t *tdsaDeviceData = (tdsaDeviceData_t *)tdsaAllShared->DeviceMem; tdsaDeviceData_t *tdsaDeviceData; tdList_t *DeviceListList; TI_DBG3(("tdsaDiscoveringStpSATADevice: start\n")); DeviceListList = tdsaAllShared->MainDeviceList.flink; tdsaDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); if (oneDeviceData) { TI_DBG3(("tdsaDiscoveringStpSATADevice: Found STP-SATA Device=%p\n", oneDeviceData)); if ((SA_IDFRM_IS_SATA_DEVICE(&oneDeviceData->sasIdentify) || SA_IDFRM_IS_STP_TARGET(&oneDeviceData->sasIdentify)) && ((onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_FULL_START && oneDeviceData->valid == agTRUE) || (onePortContext->discovery.type == TDSA_DISCOVERY_OPTION_INCREMENTAL_START && oneDeviceData->valid2 == agTRUE)) && (oneDeviceData->tdPortContext == onePortContext) ) { /* if found an STP bridges */ /* in order to get sata signature and etc */ TI_DBG3(("tdsaDiscoveringStpSATADevice: sending report phy sata\n")); tdsaReportPhySataSend(tiRoot, oneDeviceData, oneDeviceData->sasIdentify.phyIdentifier); //send ID in every discovery? No if (oneDeviceData->satDevData.IDDeviceValid == agFALSE) { TI_DBG3(("tdsaDiscoveringStpSATADevice: sending identify device data\n")); /* all internal */ status = tdsaDiscoveryStartIDDev(tiRoot, agNULL, &(oneDeviceData->tiDeviceHandle), agNULL, oneDeviceData); if (status != tiSuccess) { /* identify device data is not valid */ TI_DBG1(("tdsaDiscoveringStpSATADevice: fail or busy %d\n", status)); oneDeviceData->satDevData.IDDeviceValid = agFALSE; } } } else { TI_DBG2(("tdsaDiscoveringStpSATADevice: moving to the next\n")); oneDeviceData = tdsaFindRightDevice(tiRoot, onePortContext, tdsaDeviceData); tdsaDiscoveringStpSATADevice(tiRoot, onePortContext, oneDeviceData); } } else { /* otherwise, there is no more SATA device found */ TI_DBG3(("tdsaDiscoveringStpSATADevice: No More Device; SATA discovery finished\n")); tdsaSATADiscoverDone(tiRoot, onePortContext, tiSuccess); } return; } /***************************************************************************** *! \brief tdsaSASIncrementalDiscover * * Purpose: This function is called to trigger incremental SAS topology discovery * within a portcontext. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * * \return: * tiSuccess Discovery initiated. * tiError Discovery could not be initiated at this time. * * \note: * *****************************************************************************/ osGLOBAL bit32 tdsaSASIncrementalDiscover( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { tdsaDeviceData_t *oneDeviceData = agNULL; int i,j; bit8 portMaxRate; TI_DBG3(("tdsaSASIncrementalDiscover: start\n")); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaSASIncrementalDiscover: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return tiError; } onePortContext->DiscoveryState = ITD_DSTATE_STARTED; /* nativeSATAMode is set in ossaHwCB() in link up */ if (onePortContext->nativeSATAMode == agFALSE) /* default: SAS and SAS/SATA mode */ { if (SA_IDFRM_GET_DEVICETTYPE(&onePortContext->sasIDframe) == SAS_END_DEVICE && SA_IDFRM_IS_SSP_TARGET(&onePortContext->sasIDframe) ) { for(i=0;iPhyIDList[i] == agTRUE) { for (j=0;jagRoot, agNULL, tdsaRotateQnumber(tiRoot, agNULL), i, AGSA_PHY_NOTIFY_ENABLE_SPINUP, agNULL); } break; } } } /* add the device 1. add device in TD layer 2. call saRegisterNewDevice 3. update agDevHandle in ossaDeviceRegistrationCB() */ portMaxRate = onePortContext->LinkRate; oneDeviceData = tdsaPortSASDeviceAdd( tiRoot, onePortContext, onePortContext->sasIDframe, agFALSE, portMaxRate, IT_NEXUS_TIMEOUT, 0, SAS_DEVICE_TYPE, agNULL, 0xFF ); if (oneDeviceData) { if (oneDeviceData->registered == agFALSE) { /* set the timer and wait till the device(directly attached. eg Expander) to be registered. Then, in tdsaDeviceRegistrationTimerCB(), tdsaSASUpStreamDiscoverStart() is called */ tdsaDeviceRegistrationTimer(tiRoot, onePortContext, oneDeviceData); } else { tdsaSASUpStreamDiscoverStart(tiRoot, onePortContext, oneDeviceData); } } } else /* SATAOnlyMode*/ { tdsaSASDiscoverDone(tiRoot, onePortContext, tiSuccess); } return tiSuccess; } #ifdef SATA_ENABLE /* For the sake of completness; this is the same as tdsaSATAFullDiscover*/ /***************************************************************************** *! \brief tdsaSATAIncrementalDiscover * * Purpose: This function is called to trigger incremental SATA topology discovery * within a portcontext. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * * \return: * tiSuccess Discovery initiated. * tiError Discovery could not be initiated at this time. * * \note: * *****************************************************************************/ osGLOBAL bit32 tdsaSATAIncrementalDiscover( tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { bit32 ret = tiSuccess; tdsaDeviceData_t *oneDeviceData = agNULL; bit32 deviceType; bit8 phyRate = SAS_CONNECTION_RATE_3_0G; bit32 i; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; // tdsaDeviceData_t *tdsaDeviceData = (tdsaDeviceData_t *)tdsaAllShared->DeviceMem; tdsaDeviceData_t *tdsaDeviceData; tdList_t *DeviceListList; TI_DBG3(("tdsaSATAIncrementalDiscover: start\n")); if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaSATAIncrementalDiscover: aborting discovery\n")); tdsaSASDiscoverAbort(tiRoot, onePortContext); return tiError; } DeviceListList = tdsaAllShared->MainDeviceList.flink; tdsaDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); /* If port is SATA mode */ /* Native SATA mode is decided in ossaHWCB() SAS_LINK_UP or SATA_LINK_UP */ if (onePortContext->nativeSATAMode == agTRUE) { /* Decode device type */ deviceType = tdssSATADeviceTypeDecode(onePortContext->remoteSignature); /* Create a device descriptor for the SATA device attached to the port */ if ( deviceType == SATA_PM_DEVICE) { TI_DBG3(("tdsaSATAIncrementalDiscover: Found a PM device\n")); oneDeviceData = tdsaPortSATADeviceAdd( tiRoot, onePortContext, agNULL, onePortContext->remoteSignature, agTRUE, 0xF, phyRate, agNULL, 0xFF); } else { /* already added in ossahwcb() in SATA link up */ TI_DBG3(("tdsaSATAIncrementalDiscover: Found a DIRECT SATA device\n")); } /* Process for different device type */ switch ( deviceType ) { /* if it's PM */ case SATA_PM_DEVICE: { TI_DBG3(("tdsaSATAIncrementalDiscover: Process a PM device\n")); /* For each port of the PM */ for ( i = 0; i < SATA_MAX_PM_PORTS; i ++ ) { /* Read the signature */ /* Decode the device type */ /* Create device descriptor */ /* Callback with the discovered devices */ } break; } /* if it's ATA device */ case SATA_ATA_DEVICE: case SATA_ATAPI_DEVICE: { TI_DBG3(("tdsaSATAIncrementalDiscover: Process an ATA device. Sending Identify Device cmd\n")); /* to-check: for this direct attached one, already added and do nothing */ /* no longer, discovery sends sata identify device command */ //tdsaSATAIdentifyDeviceCmdSend(tiRoot, oneDeviceData); tdsaSATADiscoverDone(tiRoot, onePortContext, tiSuccess); break; } /* Other devices */ default: { /* callback */ TI_DBG3(("siSATAIncrementalDiscover: Process OTHER SATA device. Just report the device\n")); break; } } } /* If port is SAS mode */ else { TI_DBG3(("tdsaSATAIncrementalDiscover: Discovering attached STP devices starts....\n")); oneDeviceData = tdsaFindRightDevice(tiRoot, onePortContext, tdsaDeviceData); tdsaDiscoveringStpSATADevice(tiRoot, onePortContext, oneDeviceData); } return ret; } #endif /******************** SMP *******************************/ /***************************************************************************** *! \brief tdSMPStart * * Purpose: This function sends SMP request. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param agRoot: Pointer to chip/driver Instance. * \param oneDeviceData: Pointer to the device data. * \param functionCode: SMP function code. * \param pSmpBody: Pointer to SMP payload. * \param smpBodySize: Size of SMP request without SMP header. * \param agRequestType: SPC-specfic request type * * \return: * tiSuccess SMP is sent successfully * tiError SMP is not sent successfully * * \note: * *****************************************************************************/ osGLOBAL bit32 tdSMPStart( tiRoot_t *tiRoot, agsaRoot_t *agRoot, tdsaDeviceData_t *oneDeviceData, bit32 functionCode, bit8 *pSmpBody, /* smp payload itself w/o first 4 bytes(header) */ bit32 smpBodySize, /* smp payload size w/o first 4 bytes(header) */ bit32 agRequestType, tiIORequest_t *CurrentTaskTag, bit32 queueNumber ) { void *osMemHandle; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; bit32 expectedRspLen = 0; #ifdef REMOVED tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&(tdsaRoot->tdsaAllShared); #endif tdssSMPRequestBody_t *tdSMPRequestBody; agsaSASRequestBody_t *agSASRequestBody; agsaSMPFrame_t *agSMPFrame; agsaIORequest_t *agIORequest; agsaDevHandle_t *agDevHandle; tdssSMPFrameHeader_t tdSMPFrameHeader; tdsaPortContext_t *onePortContext = agNULL; bit32 status; #ifndef DIRECT_SMP void *IndirectSMPReqosMemHandle; bit32 IndirectSMPReqPhysUpper32; bit32 IndirectSMPReqPhysLower32; bit32 IndirectSMPReqmemAllocStatus; bit8 *IndirectSMPReq; void *IndirectSMPResposMemHandle; bit32 IndirectSMPRespPhysUpper32; bit32 IndirectSMPRespPhysLower32; bit32 IndirectSMPRespmemAllocStatus; bit8 *IndirectSMPResp; #endif TI_DBG3(("tdSMPStart: start\n")); TI_DBG3(("tdSMPStart: oneDeviceData %p\n", oneDeviceData)); TI_DBG3(("tdSMPStart: sasAddressHi 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSHI(&oneDeviceData->sasIdentify))); TI_DBG3(("tdSMPStart: sasAddressLo 0x%08x\n", SA_IDFRM_GET_SAS_ADDRESSLO(&oneDeviceData->sasIdentify))); TI_DBG3(("tdSMPStart: 2nd sasAddressHi 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); TI_DBG3(("tdSMPStart: 2nd sasAddressLo 0x%08x\n", oneDeviceData->SASAddressID.sasAddressLo)); onePortContext = oneDeviceData->tdPortContext; if (onePortContext != agNULL) { TI_DBG3(("tdSMPStart: pid %d\n", onePortContext->id)); /* increment the number of pending SMP */ onePortContext->discovery.pendingSMP++; } else { TI_DBG1(("tdSMPStart: Wrong!!! onePortContext is NULL\n")); return tiError; } memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdSMPRequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdssSMPRequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { TI_DBG1(("tdSMPStart: ostiAllocMemory failed...\n")); return tiError; } if (tdSMPRequestBody == agNULL) { TI_DBG1(("tdSMPStart: ostiAllocMemory returned NULL tdSMPRequestBody\n")); return tiError; } /* saves mem handle for freeing later */ tdSMPRequestBody->osMemHandle = osMemHandle; /* saves tdsaDeviceData */ tdSMPRequestBody->tdDevice = oneDeviceData; /* saving port id */ tdSMPRequestBody->tdPortContext = onePortContext; agDevHandle = oneDeviceData->agDevHandle; /* save the callback funtion */ tdSMPRequestBody->SMPCompletionFunc = itdssSMPCompleted; /* in itdcb.c */ /* for simulate warm target reset */ tdSMPRequestBody->CurrentTaskTag = CurrentTaskTag; /* initializes the number of SMP retries */ tdSMPRequestBody->retries = 0; #ifdef TD_INTERNAL_DEBUG /* debugging */ TI_DBG4(("tdSMPStart: SMPRequestbody %p\n", tdSMPRequestBody)); TI_DBG4(("tdSMPStart: callback fn %p\n", tdSMPRequestBody->SMPCompletionFunc)); #endif agIORequest = &(tdSMPRequestBody->agIORequest); agIORequest->osData = (void *) tdSMPRequestBody; agIORequest->sdkData = agNULL; /* SALL takes care of this */ agSASRequestBody = &(tdSMPRequestBody->agSASRequestBody); agSMPFrame = &(agSASRequestBody->smpFrame); TI_DBG3(("tdSMPStart: agIORequest %p\n", agIORequest)); TI_DBG3(("tdSMPStart: SMPRequestbody %p\n", tdSMPRequestBody)); /* depending on functionCode, set expectedRspLen in smp */ switch (functionCode) { case SMP_REPORT_GENERAL: expectedRspLen = sizeof(smpRespReportGeneral_t) + 4; break; case SMP_REPORT_MANUFACTURE_INFORMATION: expectedRspLen = sizeof(smpRespReportManufactureInfo_t) + 4; break; case SMP_DISCOVER: expectedRspLen = sizeof(smpRespDiscover_t) + 4; break; case SMP_REPORT_PHY_ERROR_LOG: expectedRspLen = 32 - 4; break; case SMP_REPORT_PHY_SATA: expectedRspLen = sizeof(smpRespReportPhySata_t) + 4; break; case SMP_REPORT_ROUTING_INFORMATION: expectedRspLen = sizeof(smpRespReportRouteTable_t) + 4; break; case SMP_CONFIGURE_ROUTING_INFORMATION: expectedRspLen = 4; break; case SMP_PHY_CONTROL: expectedRspLen = 4; break; case SMP_PHY_TEST_FUNCTION: expectedRspLen = 4; break; case SMP_PMC_SPECIFIC: expectedRspLen = 4; break; default: expectedRspLen = 0; TI_DBG1(("tdSMPStart: error!!! undefined or unused smp function code 0x%x\n", functionCode)); return tiError; } if (tiIS_SPC(agRoot)) { #ifdef DIRECT_SMP /* direct SMP with 48 or less payload */ if ( (smpBodySize + 4) <= SMP_DIRECT_PAYLOAD_LIMIT) /* 48 */ { TI_DBG3(("tdSMPStart: DIRECT smp payload\n")); osti_memset(&tdSMPFrameHeader, 0, sizeof(tdssSMPFrameHeader_t)); osti_memset(tdSMPRequestBody->smpPayload, 0, SMP_DIRECT_PAYLOAD_LIMIT); /* SMP header */ tdSMPFrameHeader.smpFrameType = SMP_REQUEST; /* SMP request */ tdSMPFrameHeader.smpFunction = (bit8)functionCode; tdSMPFrameHeader.smpFunctionResult = 0; tdSMPFrameHeader.smpReserved = 0; osti_memcpy(tdSMPRequestBody->smpPayload, &tdSMPFrameHeader, 4); // osti_memcpy((tdSMPRequestBody->smpPayload)+4, pSmpBody, smpBodySize); osti_memcpy(&(tdSMPRequestBody->smpPayload[4]), pSmpBody, smpBodySize); /* direct SMP payload eg) REPORT_GENERAL, DISCOVER etc */ agSMPFrame->outFrameBuf = tdSMPRequestBody->smpPayload; agSMPFrame->outFrameLen = smpBodySize + 4; /* without last 4 byte crc */ /* to specify DIRECT SMP response */ agSMPFrame->inFrameLen = 0; /* temporary solution for T2D Combo*/ #if defined (INITIATOR_DRIVER) && defined (TARGET_DRIVER) /* force smp repsonse to be direct */ agSMPFrame->expectedRespLen = 0; #else agSMPFrame->expectedRespLen = expectedRspLen; #endif // tdhexdump("tdSMPStart", (bit8*)agSMPFrame->outFrameBuf, agSMPFrame->outFrameLen); // tdhexdump("tdSMPStart new", (bit8*)tdSMPRequestBody->smpPayload, agSMPFrame->outFrameLen); // tdhexdump("tdSMPStart - tdSMPRequestBody", (bit8*)tdSMPRequestBody, sizeof(tdssSMPRequestBody_t)); } else { TI_DBG3(("tdSMPStart: INDIRECT smp payload\n")); } #else /* indirect SMP */ /* allocate Direct SMP request payload */ IndirectSMPReqmemAllocStatus = ostiAllocMemory( tiRoot, &IndirectSMPReqosMemHandle, (void **)&IndirectSMPReq, &IndirectSMPReqPhysUpper32, &IndirectSMPReqPhysLower32, 8, smpBodySize + 4, agFALSE ); if (IndirectSMPReqmemAllocStatus != tiSuccess) { TI_DBG1(("tdSMPStart: ostiAllocMemory failed for indirect SMP request...\n")); return tiError; } if (IndirectSMPReq == agNULL) { TI_DBG1(("tdSMPStart: ostiAllocMemory returned NULL IndirectSMPReq\n")); return tiError; } /* allocate indirect SMP response payload */ IndirectSMPRespmemAllocStatus = ostiAllocMemory( tiRoot, &IndirectSMPResposMemHandle, (void **)&IndirectSMPResp, &IndirectSMPRespPhysUpper32, &IndirectSMPRespPhysLower32, 8, expectedRspLen, agFALSE ); if (IndirectSMPRespmemAllocStatus != tiSuccess) { TI_DBG1(("tdSMPStart: ostiAllocMemory failed for indirect SMP reponse...\n")); return tiError; } if (IndirectSMPResp == agNULL) { TI_DBG1(("tdSMPStart: ostiAllocMemory returned NULL IndirectSMPResp\n")); return tiError; } /* saves mem handle for freeing later */ tdSMPRequestBody->IndirectSMPReqosMemHandle = IndirectSMPReqosMemHandle; tdSMPRequestBody->IndirectSMPResposMemHandle = IndirectSMPResposMemHandle; /* saves Indirect SMP request/repsonse pointer and length for free them later */ tdSMPRequestBody->IndirectSMPReq = IndirectSMPReq; tdSMPRequestBody->IndirectSMPResp = IndirectSMPResp; tdSMPRequestBody->IndirectSMPReqLen = smpBodySize + 4; tdSMPRequestBody->IndirectSMPRespLen = expectedRspLen; /* fill in indirect SMP request fields */ TI_DBG3(("tdSMPStart: INDIRECT smp payload\n")); /* SMP request and response initialization */ osti_memset(&tdSMPFrameHeader, 0, sizeof(tdssSMPFrameHeader_t)); osti_memset(IndirectSMPReq, 0, smpBodySize + 4); osti_memset(IndirectSMPResp, 0, expectedRspLen); /* SMP request header */ tdSMPFrameHeader.smpFrameType = SMP_REQUEST; /* SMP request */ tdSMPFrameHeader.smpFunction = (bit8)functionCode; tdSMPFrameHeader.smpFunctionResult = 0; tdSMPFrameHeader.smpReserved = 0; osti_memcpy(IndirectSMPReq, &tdSMPFrameHeader, 4); osti_memcpy(IndirectSMPReq+4, pSmpBody, smpBodySize); /* Indirect SMP request */ agSMPFrame->outFrameBuf = agNULL; agSMPFrame->outFrameAddrUpper32 = IndirectSMPReqPhysUpper32; agSMPFrame->outFrameAddrLower32 = IndirectSMPReqPhysLower32; agSMPFrame->outFrameLen = smpBodySize + 4; /* without last 4 byte crc */ /* Indirect SMP response */ agSMPFrame->expectedRespLen = expectedRspLen; agSMPFrame->inFrameLen = expectedRspLen; /* without last 4 byte crc */ agSMPFrame->inFrameAddrUpper32 = IndirectSMPRespPhysUpper32; agSMPFrame->inFrameAddrLower32 = IndirectSMPRespPhysLower32; #endif } else /* SPCv controller */ { /* only direct mode for both request and response */ TI_DBG3(("tdSMPStart: DIRECT smp payload\n")); agSMPFrame->flag = 0; osti_memset(&tdSMPFrameHeader, 0, sizeof(tdssSMPFrameHeader_t)); osti_memset(tdSMPRequestBody->smpPayload, 0, SMP_DIRECT_PAYLOAD_LIMIT); /* SMP header */ tdSMPFrameHeader.smpFrameType = SMP_REQUEST; /* SMP request */ tdSMPFrameHeader.smpFunction = (bit8)functionCode; tdSMPFrameHeader.smpFunctionResult = 0; tdSMPFrameHeader.smpReserved = 0; osti_memcpy(tdSMPRequestBody->smpPayload, &tdSMPFrameHeader, 4); // osti_memcpy((tdSMPRequestBody->smpPayload)+4, pSmpBody, smpBodySize); osti_memcpy(&(tdSMPRequestBody->smpPayload[4]), pSmpBody, smpBodySize); /* direct SMP payload eg) REPORT_GENERAL, DISCOVER etc */ agSMPFrame->outFrameBuf = tdSMPRequestBody->smpPayload; agSMPFrame->outFrameLen = smpBodySize + 4; /* without last 4 byte crc */ /* to specify DIRECT SMP response */ agSMPFrame->inFrameLen = 0; /* temporary solution for T2D Combo*/ #if defined (INITIATOR_DRIVER) && defined (TARGET_DRIVER) /* force smp repsonse to be direct */ agSMPFrame->expectedRespLen = 0; #else agSMPFrame->expectedRespLen = expectedRspLen; #endif // tdhexdump("tdSMPStart", (bit8*)agSMPFrame->outFrameBuf, agSMPFrame->outFrameLen); // tdhexdump("tdSMPStart new", (bit8*)tdSMPRequestBody->smpPayload, agSMPFrame->outFrameLen); // tdhexdump("tdSMPStart - tdSMPRequestBody", (bit8*)tdSMPRequestBody, sizeof(tdssSMPRequestBody_t)); } if (agDevHandle == agNULL) { TI_DBG1(("tdSMPStart: !!! agDevHandle is NULL !!! \n")); return tiError; } tdSMPRequestBody->queueNumber = queueNumber; status = saSMPStart( agRoot, agIORequest, queueNumber, //tdsaAllShared->SMPQNum, //tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, agRequestType, agSASRequestBody, &ossaSMPCompleted ); if (status == AGSA_RC_SUCCESS) { /* start SMP timer */ if (functionCode == SMP_REPORT_GENERAL || functionCode == SMP_DISCOVER || functionCode == SMP_REPORT_PHY_SATA || functionCode == SMP_CONFIGURE_ROUTING_INFORMATION ) { tdsaDiscoverySMPTimer(tiRoot, onePortContext, functionCode, tdSMPRequestBody); } return tiSuccess; } else if (status == AGSA_RC_BUSY) { /* set timer */ if (functionCode == SMP_REPORT_GENERAL || functionCode == SMP_DISCOVER || functionCode == SMP_REPORT_PHY_SATA || functionCode == SMP_CONFIGURE_ROUTING_INFORMATION) { /* only for discovery related SMPs*/ tdsaSMPBusyTimer(tiRoot, onePortContext, oneDeviceData, tdSMPRequestBody); return tiSuccess; } else if (functionCode == SMP_PHY_CONTROL) { ostiFreeMemory( tiRoot, osMemHandle, sizeof(tdssSMPRequestBody_t) ); return tiBusy; } else { ostiFreeMemory( tiRoot, osMemHandle, sizeof(tdssSMPRequestBody_t) ); return tiBusy; } } else /* AGSA_RC_FAILURE */ { /* discovery failure or task management failure */ if (functionCode == SMP_REPORT_GENERAL || functionCode == SMP_DISCOVER || functionCode == SMP_REPORT_PHY_SATA || functionCode == SMP_CONFIGURE_ROUTING_INFORMATION) { tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); } ostiFreeMemory( tiRoot, osMemHandle, sizeof(tdssSMPRequestBody_t) ); return tiError; } } #ifdef REMOVED /***************************************************************************** *! \brief tdsaFindLocalLinkRate * * Purpose: This function finds local link rate. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param tdsaPortStartInfo: Pointer to the port start information. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL bit8 tdsaFindLocalLinkRate( tiRoot_t *tiRoot, tdsaPortStartInfo_t *tdsaPortStartInfo ) { bit8 ans = SAS_CONNECTION_RATE_3_0G; /* default */ bit32 phyProperties; phyProperties = tdsaPortStartInfo->agPhyConfig.phyProperties; TI_DBG3(("tdsaFindLocalLinkRate: start\n")); if (phyProperties & 0x4) { ans = SAS_CONNECTION_RATE_6_0G; } if (phyProperties & 0x2) { ans = SAS_CONNECTION_RATE_3_0G; } if (phyProperties & 0x1) { ans = SAS_CONNECTION_RATE_1_5G; } TI_DBG3(("tdsaFindLocalLinkRate: ans 0x%x\n", ans)); return ans; } #endif /***************************************************************************** *! \brief tdsaConfigureRouteTimer * * Purpose: This function sets timers for configuring routing of discovery and * its callback function. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneExpander: Pointer to the expander. * \param ptdSMPDiscoverResp: Pointer to SMP discover repsonse data. * * \return: * None * * \note: called by tdsaDiscoverRespRcvd() * *****************************************************************************/ osGLOBAL void tdsaConfigureRouteTimer(tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaExpander_t *oneExpander, smpRespDiscover_t *ptdSMPDiscoverResp ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; tdsaDiscovery_t *discovery; TI_DBG1(("tdsaConfigureRouteTimer: start\n")); TI_DBG1(("tdsaConfigureRouteTimer: pid %d\n", onePortContext->id)); discovery = &(onePortContext->discovery); TI_DBG1(("tdsaConfigureRouteTimer: onePortContext %p oneExpander %p ptdSMPDiscoverResp %p\n", onePortContext, oneExpander, ptdSMPDiscoverResp)); TI_DBG1(("tdsaConfigureRouteTimer: discovery %p \n", discovery)); TI_DBG1(("tdsaConfigureRouteTimer: pid %d configureRouteRetries %d\n", onePortContext->id, discovery->configureRouteRetries)); TI_DBG1(("tdsaConfigureRouteTimer: discovery->status %d\n", discovery->status)); if (discovery->configureRouteTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->configureRouteTimer ); } TI_DBG1(("tdsaConfigureRouteTimer: UsecsPerTick %d\n", Initiator->OperatingOption.UsecsPerTick)); TI_DBG1(("tdsaConfigureRouteTimer: Timervalue %d\n", CONFIGURE_ROUTE_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick)); tdsaSetTimerRequest( tiRoot, &discovery->configureRouteTimer, CONFIGURE_ROUTE_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick, tdsaConfigureRouteTimerCB, (void *)onePortContext, (void *)oneExpander, (void *)ptdSMPDiscoverResp ); tdsaAddTimer ( tiRoot, &Initiator->timerlist, &discovery->configureRouteTimer ); return; } /***************************************************************************** *! \brief tdsaConfigureRouteTimerCB * * Purpose: This function is callback function for tdsaConfigureRouteTimer. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param timerData1: Pointer to timer-related data structure * \param timerData2: Pointer to timer-related data structure * \param timerData3: Pointer to timer-related data structure * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaConfigureRouteTimerCB( tiRoot_t * tiRoot, void * timerData1, void * timerData2, void * timerData3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; tdsaPortContext_t *onePortContext; tdsaExpander_t *oneExpander; smpRespDiscover_t *ptdSMPDiscoverResp; tdsaDiscovery_t *discovery; TI_DBG1(("tdsaConfigureRouteTimerCB: start\n")); onePortContext = (tdsaPortContext_t *)timerData1; oneExpander = (tdsaExpander_t *)timerData2; ptdSMPDiscoverResp = (smpRespDiscover_t *)timerData3; discovery = &(onePortContext->discovery); TI_DBG1(("tdsaConfigureRouteTimerCB: onePortContext %p oneExpander %p ptdSMPDiscoverResp %p\n", onePortContext, oneExpander, ptdSMPDiscoverResp)); TI_DBG1(("tdsaConfigureRouteTimerCB: discovery %p\n", discovery)); TI_DBG1(("tdsaConfigureRouteTimerCB: pid %d configureRouteRetries %d\n", onePortContext->id, discovery->configureRouteRetries)); TI_DBG1(("tdsaConfigureRouteTimerCB: discovery.status %d\n", discovery->status)); discovery->configureRouteRetries++; if (discovery->configureRouteRetries >= DISCOVERY_RETRIES) { TI_DBG1(("tdsaConfigureRouteTimerCB: retries are over\n")); discovery->configureRouteRetries = 0; /* failed the discovery */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); if (discovery->configureRouteTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->configureRouteTimer ); } return; } if (onePortContext->discovery.status == DISCOVERY_DOWN_STREAM) { TI_DBG1(("tdsaConfigureRouteTimerCB: proceed by calling tdsaSASDownStreamDiscoverExpanderPhy\n")); tdhexdump("tdsaConfigureRouteTimerCB", (bit8*)ptdSMPDiscoverResp, sizeof(smpRespDiscover_t)); discovery->configureRouteRetries = 0; tdsaSASDownStreamDiscoverExpanderPhy(tiRoot, onePortContext, oneExpander, ptdSMPDiscoverResp); } else { TI_DBG1(("tdsaConfigureRouteTimerCB: setting timer again\n")); /* set the timer again */ tdsaSetTimerRequest( tiRoot, &discovery->configureRouteTimer, CONFIGURE_ROUTE_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick, tdsaConfigureRouteTimerCB, (void *)onePortContext, (void *)oneExpander, (void *)ptdSMPDiscoverResp ); tdsaAddTimer ( tiRoot, &Initiator->timerlist, &discovery->configureRouteTimer ); } // tdsaReportGeneralSend(tiRoot, oneDeviceData); return; } /***************************************************************************** *! \brief tdsaDiscoveryTimer * * Purpose: This function sets timers for discovery and its callback * function. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaDiscoveryTimer(tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaDeviceData_t *oneDeviceData ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; tdsaDiscovery_t *discovery; TI_DBG1(("tdsaDiscoveryTimer: start\n")); TI_DBG1(("tdsaDiscoveryTimer: pid %d\n", onePortContext->id)); discovery = &(onePortContext->discovery); if (discovery->discoveryTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->discoveryTimer ); } TI_DBG1(("tdsaDiscoveryTimer: UsecsPerTick %d\n", Initiator->OperatingOption.UsecsPerTick)); TI_DBG1(("tdsaDiscoveryTimer: Timervalue %d\n", DISCOVERY_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick)); tdsaSetTimerRequest( tiRoot, &discovery->discoveryTimer, DISCOVERY_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick, tdsaDiscoveryTimerCB, oneDeviceData, agNULL, agNULL ); tdsaAddTimer ( tiRoot, &Initiator->timerlist, &discovery->discoveryTimer ); return; } /***************************************************************************** *! \brief tdsaDiscoveryTimerCB * * Purpose: This function is callback function for discovery timer. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param timerData1: Pointer to timer-related data structure * \param timerData2: Pointer to timer-related data structure * \param timerData3: Pointer to timer-related data structure * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaDiscoveryTimerCB( tiRoot_t * tiRoot, void * timerData1, void * timerData2, void * timerData3 ) { tdsaDeviceData_t *oneDeviceData; oneDeviceData = (tdsaDeviceData_t *)timerData1; TI_DBG1(("tdsaDiscoveryTimerCB: start\n")); if (oneDeviceData->registered == agTRUE) { TI_DBG1(("tdsaDiscoveryTimerCB: resumes discovery\n")); tdsaReportGeneralSend(tiRoot, oneDeviceData); } return; } /***************************************************************************** *! \brief tdsaDeviceRegistrationTimer * * Purpose: This function sets timers for device registration in discovery * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * \return: * None * * \note: called by tdsaSASFullDiscover() or tdsaSASIncrementalDiscover() * or tdsaDeviceRegistrationTimerCB() * *****************************************************************************/ osGLOBAL void tdsaDeviceRegistrationTimer(tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaDeviceData_t *oneDeviceData ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; tdsaDiscovery_t *discovery; TI_DBG1(("tdsaDeviceRegistrationTimer: start\n")); TI_DBG1(("tdsaDeviceRegistrationTimer: pid %d\n", onePortContext->id)); discovery = &(onePortContext->discovery); if (discovery->deviceRegistrationTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->deviceRegistrationTimer ); } TI_DBG1(("tdsaDeviceRegistrationTimer: UsecsPerTick %d\n", Initiator->OperatingOption.UsecsPerTick)); TI_DBG1(("tdsaDeviceRegistrationTimer: Timervalue %d\n", DEVICE_REGISTRATION_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick)); tdsaSetTimerRequest( tiRoot, &discovery->deviceRegistrationTimer, DEVICE_REGISTRATION_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick, tdsaDeviceRegistrationTimerCB, onePortContext, oneDeviceData, agNULL ); tdsaAddTimer ( tiRoot, &Initiator->timerlist, &discovery->deviceRegistrationTimer ); return; } /***************************************************************************** *! \brief tdsaDeviceRegistrationTimerCB * * Purpose: This function is callback function for tdsaDeviceRegistrationTimer. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param timerData1: Pointer to timer-related data structure * \param timerData2: Pointer to timer-related data structure * \param timerData3: Pointer to timer-related data structure * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaDeviceRegistrationTimerCB( tiRoot_t * tiRoot, void * timerData1, void * timerData2, void * timerData3 ) { tdsaPortContext_t *onePortContext; tdsaDeviceData_t *oneDeviceData; tdsaDiscovery_t *discovery; TI_DBG1(("tdsaDeviceRegistrationTimerCB: start\n")); onePortContext = (tdsaPortContext_t *)timerData1; oneDeviceData = (tdsaDeviceData_t *)timerData2; discovery = &(onePortContext->discovery); if (oneDeviceData->registered == agFALSE) { discovery->deviceRetistrationRetries++; if (discovery->deviceRetistrationRetries >= DISCOVERY_RETRIES) { TI_DBG1(("tdsaDeviceRegistrationTimerCB: retries are over\n")); discovery->deviceRetistrationRetries = 0; /* failed the discovery */ tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); if (discovery->deviceRegistrationTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->deviceRegistrationTimer ); } } else { TI_DBG1(("tdsaDeviceRegistrationTimerCB: keep retrying\n")); /* start timer for device registration */ tdsaDeviceRegistrationTimer(tiRoot, onePortContext, oneDeviceData); } } else { /* go ahead; continue the discovery */ discovery->deviceRetistrationRetries = 0; tdsaSASUpStreamDiscoverStart(tiRoot, onePortContext, oneDeviceData); } } /***************************************************************************** *! \brief tdsaSMPBusyTimer * * Purpose: This function sets timers for busy of saSMPStart. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * \param tdSMPRequestBody: Pointer to the SMP request body. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSMPBusyTimer(tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, tdsaDeviceData_t *oneDeviceData, tdssSMPRequestBody_t *tdSMPRequestBody ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; tdsaDiscovery_t *discovery; TI_DBG1(("tdsaSMPBusyTimer: start\n")); TI_DBG1(("tdsaSMPBusyTimer: pid %d\n", onePortContext->id)); discovery = &(onePortContext->discovery); if (discovery->SMPBusyTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->SMPBusyTimer ); } tdsaSetTimerRequest( tiRoot, &discovery->SMPBusyTimer, SMP_BUSY_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick, tdsaSMPBusyTimerCB, onePortContext, oneDeviceData, tdSMPRequestBody ); tdsaAddTimer ( tiRoot, &Initiator->timerlist, &discovery->SMPBusyTimer ); return; } /***************************************************************************** *! \brief tdsaSMPBusyTimerCB * * Purpose: This function is callback function for SMP busy timer. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param timerData1: Pointer to timer-related data structure * \param timerData2: Pointer to timer-related data structure * \param timerData3: Pointer to timer-related data structure * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSMPBusyTimerCB( tiRoot_t * tiRoot, void * timerData1, void * timerData2, void * timerData3 ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&(tdsaRoot->tdsaAllShared); agsaRoot_t *agRoot; tdsaPortContext_t *onePortContext; tdsaDeviceData_t *oneDeviceData; tdssSMPRequestBody_t *tdSMPRequestBody; agsaSASRequestBody_t *agSASRequestBody; agsaIORequest_t *agIORequest; agsaDevHandle_t *agDevHandle; tdsaDiscovery_t *discovery; bit32 status = AGSA_RC_FAILURE; TI_DBG1(("tdsaSMPBusyTimerCB: start\n")); onePortContext = (tdsaPortContext_t *)timerData1; oneDeviceData = (tdsaDeviceData_t *)timerData2; tdSMPRequestBody = (tdssSMPRequestBody_t *)timerData3; agRoot = oneDeviceData->agRoot; agIORequest = &(tdSMPRequestBody->agIORequest); agDevHandle = oneDeviceData->agDevHandle; agSASRequestBody = &(tdSMPRequestBody->agSASRequestBody); discovery = &(onePortContext->discovery); discovery->SMPRetries++; if (discovery->SMPRetries < SMP_BUSY_RETRIES) { status = saSMPStart( agRoot, agIORequest, tdsaAllShared->SMPQNum, //tdsaRotateQnumber(tiRoot, oneDeviceData), agDevHandle, AGSA_SMP_INIT_REQ, agSASRequestBody, &ossaSMPCompleted ); } if (status == AGSA_RC_SUCCESS) { discovery->SMPRetries = 0; if (discovery->SMPBusyTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->SMPBusyTimer ); } } else if (status == AGSA_RC_FAILURE) { discovery->SMPRetries = 0; tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); if (discovery->SMPBusyTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->SMPBusyTimer ); } } else /* AGSA_RC_BUSY */ { if (discovery->SMPRetries >= SMP_BUSY_RETRIES) { /* done with retris; give up */ TI_DBG1(("tdsaSMPBusyTimerCB: retries are over\n")); discovery->SMPRetries = 0; tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); if (discovery->SMPBusyTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->SMPBusyTimer ); } } else { /* keep retrying */ tdsaSMPBusyTimer(tiRoot, onePortContext, oneDeviceData, tdSMPRequestBody); } } return; } /***************************************************************************** *! \brief tdsaBCTimer * * Purpose: This function sets timers for sending ID device data only for * directly attached SATA device. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * \param tdSMPRequestBody: Pointer to the SMP request body. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaBCTimer(tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; tdsaDiscovery_t *discovery; TI_DBG1(("tdsaBCTimer: start\n")); discovery = &(onePortContext->discovery); if (discovery->BCTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->BCTimer ); } if (onePortContext->valid == agTRUE) { tdsaSetTimerRequest( tiRoot, &discovery->BCTimer, BC_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick, tdsaBCTimerCB, onePortContext, agNULL, agNULL ); tdsaAddTimer( tiRoot, &Initiator->timerlist, &discovery->BCTimer ); } return; } /***************************************************************************** *! \brief tdsaBCTimerCB * * Purpose: This function is callback function for SATA ID device data. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param timerData1: Pointer to timer-related data structure * \param timerData2: Pointer to timer-related data structure * \param timerData3: Pointer to timer-related data structure * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaBCTimerCB( tiRoot_t * tiRoot, void * timerData1, void * timerData2, void * timerData3 ) { tdsaPortContext_t *onePortContext; tdsaDiscovery_t *discovery; TI_DBG1(("tdsaBCTimerCB: start\n")); onePortContext = (tdsaPortContext_t *)timerData1; discovery = &(onePortContext->discovery); discovery->ResetTriggerred = agFALSE; if (onePortContext->valid == agTRUE) { tdsaDiscover( tiRoot, onePortContext, TDSA_DISCOVERY_TYPE_SAS, TDSA_DISCOVERY_OPTION_INCREMENTAL_START ); } if (discovery->BCTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->BCTimer ); } return; } /***************************************************************************** *! \brief tdsaDiscoverySMPTimer * * Purpose: This function sets timers for sending discovery-related SMP * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param functionCode: SMP function. * \param tdSMPRequestBody: Pointer to the SMP request body. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaDiscoverySMPTimer(tiRoot_t *tiRoot, tdsaPortContext_t *onePortContext, bit32 functionCode, /* smp function code */ tdssSMPRequestBody_t *tdSMPRequestBody ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; tdsaDiscovery_t *discovery; TI_DBG3(("tdsaDiscoverySMPTimer: start\n")); TI_DBG3(("tdsaDiscoverySMPTimer: pid %d SMPFn 0x%x\n", onePortContext->id, functionCode)); /* start the SMP timer which works as SMP application timer */ discovery = &(onePortContext->discovery); if (discovery->DiscoverySMPTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->DiscoverySMPTimer ); } tdsaSetTimerRequest( tiRoot, &discovery->DiscoverySMPTimer, SMP_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick, tdsaDiscoverySMPTimerCB, onePortContext, tdSMPRequestBody, agNULL ); tdsaAddTimer ( tiRoot, &Initiator->timerlist, &discovery->DiscoverySMPTimer ); return; } /***************************************************************************** *! \brief tdsaDiscoverySMPTimerCB * * Purpose: This function is callback function for tdsaDiscoverySMPTimer. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param timerData1: Pointer to timer-related data structure * \param timerData2: Pointer to timer-related data structure * \param timerData3: Pointer to timer-related data structure * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaDiscoverySMPTimerCB( tiRoot_t * tiRoot, void * timerData1, void * timerData2, void * timerData3 ) { agsaRoot_t *agRoot; tdsaPortContext_t *onePortContext; bit8 SMPFunction; #ifndef DIRECT_SMP tdssSMPFrameHeader_t *tdSMPFrameHeader; bit8 smpHeader[4]; #endif tdssSMPRequestBody_t *tdSMPRequestBody; tdsaDiscovery_t *discovery; tdsaDeviceData_t *oneDeviceData; agsaIORequest_t *agAbortIORequest = agNULL; tdIORequestBody_t *tdAbortIORequestBody = agNULL; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; void *osMemHandle; agsaIORequest_t *agToBeAbortIORequest = agNULL; TI_DBG1(("tdsaDiscoverySMPTimerCB: start\n")); /* no retry if discovery related SMP, fail the discovery else .... be sure to abort SMP */ onePortContext = (tdsaPortContext_t *)timerData1; tdSMPRequestBody = (tdssSMPRequestBody_t *)timerData2; discovery = &(onePortContext->discovery); oneDeviceData = tdSMPRequestBody->tdDevice; agToBeAbortIORequest = &(tdSMPRequestBody->agIORequest); agRoot = oneDeviceData->agRoot; #ifdef DIRECT_SMP SMPFunction = tdSMPRequestBody->smpPayload[1]; #else saFrameReadBlock(agRoot, tdSMPRequestBody->IndirectSMPResp, 0, smpHeader, 4); tdSMPFrameHeader = (tdssSMPFrameHeader_t *)smpHeader; SMPFunction = tdSMPFrameHeader->smpFunction; #endif TI_DBG1(("tdsaDiscoverySMPTimerCB: SMP function 0x%x\n", SMPFunction)); if (discovery->DiscoverySMPTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &discovery->DiscoverySMPTimer ); } switch (SMPFunction) { case SMP_REPORT_GENERAL: /* fall through */ case SMP_DISCOVER: /* fall through */ case SMP_CONFIGURE_ROUTING_INFORMATION: /* fall through */ TI_DBG1(("tdsaDiscoverySMPTimerCB: failing discovery, SMP function 0x%x\n", SMPFunction)); tdsaSASDiscoverDone(tiRoot, onePortContext, tiError); return; case SMP_REPORT_PHY_SATA: TI_DBG1(("tdsaDiscoverySMPTimerCB: failing discovery, SMP function SMP_REPORT_PHY_SATA\n")); tdsaSATADiscoverDone(tiRoot, onePortContext, tiError); break; default: /* do nothing */ TI_DBG1(("tdsaDiscoverySMPTimerCB: Error!!!! not allowed case\n")); break; } if (onePortContext->discovery.SeenBC == agTRUE) { /* allocating agIORequest for abort itself */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdAbortIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { /* let os process IO */ TI_DBG1(("tdsaDiscoverySMPTimerCB: ostiAllocMemory failed...\n")); return; } if (tdAbortIORequestBody == agNULL) { /* let os process IO */ TI_DBG1(("tdsaDiscoverySMPTimerCB: ostiAllocMemory returned NULL tdAbortIORequestBody\n")); return; } /* setup task management structure */ tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; /* setting callback */ tdAbortIORequestBody->IOCompletionFunc = itdssIOAbortedHandler; tdAbortIORequestBody->tiDevHandle = (tiDeviceHandle_t *)&(oneDeviceData->tiDeviceHandle); /* initialize agIORequest */ agAbortIORequest = &(tdAbortIORequestBody->agIORequest); agAbortIORequest->osData = (void *) tdAbortIORequestBody; agAbortIORequest->sdkData = agNULL; /* LL takes care of this */ /* SMPAbort - abort one */ saSMPAbort(agRoot, agAbortIORequest, 0, oneDeviceData->agDevHandle, 0, /* abort one */ agToBeAbortIORequest, agNULL ); } return; } /***************************************************************************** *! \brief tdsaSATAIDDeviceTimer * * Purpose: This function sets timers for sending ID device data only for * directly attached SATA device. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param onePortContext: Pointer to the portal context instance. * \param oneDeviceData: Pointer to the device data. * \param tdSMPRequestBody: Pointer to the SMP request body. * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSATAIDDeviceTimer(tiRoot_t *tiRoot, tdsaDeviceData_t *oneDeviceData ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; itdsaIni_t *Initiator = (itdsaIni_t *)tdsaAllShared->itdsaIni; TI_DBG1(("tdsaSATAIDDeviceTimer: start\n")); if (oneDeviceData->SATAIDDeviceTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &oneDeviceData->SATAIDDeviceTimer ); } tdsaSetTimerRequest( tiRoot, &oneDeviceData->SATAIDDeviceTimer, SATA_ID_DEVICE_DATA_TIMER_VALUE/Initiator->OperatingOption.UsecsPerTick, tdsaSATAIDDeviceTimerCB, oneDeviceData, agNULL, agNULL ); tdsaAddTimer ( tiRoot, &Initiator->timerlist, &oneDeviceData->SATAIDDeviceTimer ); return; } /***************************************************************************** *! \brief tdsaSATAIDDeviceTimerCB * * Purpose: This function is callback function for SATA ID device data. * * \param tiRoot: Pointer to the OS Specific module allocated tiRoot_t * instance. * \param timerData1: Pointer to timer-related data structure * \param timerData2: Pointer to timer-related data structure * \param timerData3: Pointer to timer-related data structure * * \return: * None * * \note: * *****************************************************************************/ osGLOBAL void tdsaSATAIDDeviceTimerCB( tiRoot_t * tiRoot, void * timerData1, void * timerData2, void * timerData3 ) { tdsaDeviceData_t *oneDeviceData; TI_DBG1(("tdsaSATAIDDeviceTimerCB: start\n")); oneDeviceData = (tdsaDeviceData_t *)timerData1; /* send identify device data */ tdssSubAddSATAToSharedcontext(tiRoot, oneDeviceData); if (oneDeviceData->SATAIDDeviceTimer.timerRunning == agTRUE) { tdsaKillTimer( tiRoot, &oneDeviceData->SATAIDDeviceTimer ); } return; } #endif /* TD_DISCOVER */ Index: head/sys/dev/pms/RefTisa/tisa/sassata/sata/host/sat.c =================================================================== --- head/sys/dev/pms/RefTisa/tisa/sassata/sata/host/sat.c (revision 359440) +++ head/sys/dev/pms/RefTisa/tisa/sassata/sata/host/sat.c (revision 359441) @@ -1,23309 +1,23309 @@ /******************************************************************************* *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the *following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided *with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED *WARRANTIES,INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS *FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT *NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR *BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ********************************************************************************/ /*****************************************************************************/ /** \file * * The file implementing SCSI/ATA Translation (SAT). * The routines in this file are independent from HW LL API. * */ /*****************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #include #include #include #ifdef SATA_ENABLE #include #include #include #include #include #include #include #ifdef FDS_SM #include #include #include #endif #ifdef FDS_DM #include #include #include #endif #include #include #include #ifdef INITIATOR_DRIVER #include #include #include #endif #ifdef TARGET_DRIVER #include #include #include #endif #include #include #include #include /***************************************************************************** *! \brief satIOStart * * This routine is called to initiate a new SCSI request to SATL. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return: * * \e tiSuccess: I/O request successfully initiated. * \e tiBusy: No resources available, try again later. * \e tiIONoDevice: Invalid device handle. * \e tiError: Other errors that prevent the I/O request to be started. * * *****************************************************************************/ GLOBAL bit32 satIOStart( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext ) { bit32 retVal = tiSuccess; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; tiLUN_t *pLun; satInternalIo_t *pSatIntIo; #ifdef TD_DEBUG_ENABLE tdsaDeviceData_t *oneDeviceData; #endif pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; pLun = &scsiCmnd->lun; /* * Reject all other LUN other than LUN 0. */ if ( ((pLun->lun[0] | pLun->lun[1] | pLun->lun[2] | pLun->lun[3] | pLun->lun[4] | pLun->lun[5] | pLun->lun[6] | pLun->lun[7] ) != 0) && (scsiCmnd->cdb[0] != SCSIOPC_INQUIRY) ) { TI_DBG1(("satIOStart: *** REJECT *** LUN not zero, cdb[0]=0x%x tiIORequest=%p tiDeviceHandle=%p\n", scsiCmnd->cdb[0], tiIORequest, tiDeviceHandle)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_NOT_SUPPORTED, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); retVal = tiSuccess; goto ext; } TI_DBG6(("satIOStart: satPendingIO %d satNCQMaxIO %d\n",pSatDevData->satPendingIO, pSatDevData->satNCQMaxIO )); /* this may happen after tiCOMReset until OS sends inquiry */ if (pSatDevData->IDDeviceValid == agFALSE && (scsiCmnd->cdb[0] != SCSIOPC_INQUIRY)) { #ifdef TD_DEBUG_ENABLE oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; #endif TI_DBG1(("satIOStart: invalid identify device data did %d\n", oneDeviceData->id)); retVal = tiIONoDevice; goto ext; } /* * Check if we need to return BUSY, i.e. recovery in progress */ if (pSatDevData->satDriveState == SAT_DEV_STATE_IN_RECOVERY) { #ifdef TD_DEBUG_ENABLE oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; #endif TI_DBG1(("satIOStart: IN RECOVERY STATE cdb[0]=0x%x tiIORequest=%p tiDeviceHandle=%p\n", scsiCmnd->cdb[0], tiIORequest, tiDeviceHandle)); TI_DBG1(("satIOStart: IN RECOVERY STATE did %d\n", oneDeviceData->id)); TI_DBG1(("satIOStart: device %p satPendingIO %d satNCQMaxIO %d\n",pSatDevData, pSatDevData->satPendingIO, pSatDevData->satNCQMaxIO )); TI_DBG1(("satIOStart: device %p satPendingNCQIO %d satPendingNONNCQIO %d\n",pSatDevData, pSatDevData->satPendingNCQIO, pSatDevData->satPendingNONNCQIO)); retVal = tiError; goto ext; // return tiBusy; } if (pSatDevData->satDeviceType == SATA_ATAPI_DEVICE) { if (scsiCmnd->cdb[0] == SCSIOPC_REPORT_LUN) { return satReportLun(tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); } else { return satPacket(tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); } } else /* pSatDevData->satDeviceType != SATA_ATAPI_DEVICE */ { /* Parse CDB */ switch(scsiCmnd->cdb[0]) { case SCSIOPC_READ_6: retVal = satRead6( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_READ_10: retVal = satRead10( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_READ_12: TI_DBG5(("satIOStart: SCSIOPC_READ_12\n")); retVal = satRead12( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_READ_16: retVal = satRead16( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_WRITE_6: retVal = satWrite6( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_WRITE_10: retVal = satWrite10( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_WRITE_12: TI_DBG5(("satIOStart: SCSIOPC_WRITE_12 \n")); retVal = satWrite12( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_WRITE_16: TI_DBG5(("satIOStart: SCSIOPC_WRITE_16\n")); retVal = satWrite16( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_VERIFY_10: retVal = satVerify10( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_VERIFY_12: TI_DBG5(("satIOStart: SCSIOPC_VERIFY_12\n")); retVal = satVerify12( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_VERIFY_16: TI_DBG5(("satIOStart: SCSIOPC_VERIFY_16\n")); retVal = satVerify16( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_TEST_UNIT_READY: retVal = satTestUnitReady( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_INQUIRY: retVal = satInquiry( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_REQUEST_SENSE: retVal = satRequestSense( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_MODE_SENSE_6: retVal = satModeSense6( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_MODE_SENSE_10: retVal = satModeSense10( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_READ_CAPACITY_10: retVal = satReadCapacity10( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_READ_CAPACITY_16: retVal = satReadCapacity16( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_REPORT_LUN: retVal = satReportLun( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_FORMAT_UNIT: TI_DBG5(("satIOStart: SCSIOPC_FORMAT_UNIT\n")); retVal = satFormatUnit( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_SEND_DIAGNOSTIC: /* Table 28, p40 */ TI_DBG5(("satIOStart: SCSIOPC_SEND_DIAGNOSTIC\n")); retVal = satSendDiagnostic( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_START_STOP_UNIT: TI_DBG5(("satIOStart: SCSIOPC_START_STOP_UNIT\n")); retVal = satStartStopUnit( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_WRITE_SAME_10: /* sector and LBA; SAT p64 case 3 accessing payload and very inefficient now */ TI_DBG5(("satIOStart: SCSIOPC_WRITE_SAME_10\n")); retVal = satWriteSame10( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_WRITE_SAME_16: /* no support due to transfer length(sector count) */ TI_DBG5(("satIOStart: SCSIOPC_WRITE_SAME_16\n")); retVal = satWriteSame16( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_LOG_SENSE: /* SCT and log parameter(informational exceptions) */ TI_DBG5(("satIOStart: SCSIOPC_LOG_SENSE\n")); retVal = satLogSense( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_MODE_SELECT_6: /*mode layout and AlloLen check */ TI_DBG5(("satIOStart: SCSIOPC_MODE_SELECT_6\n")); retVal = satModeSelect6( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_MODE_SELECT_10: /* mode layout and AlloLen check and sharing CB with satModeSelect6*/ TI_DBG5(("satIOStart: SCSIOPC_MODE_SELECT_10\n")); retVal = satModeSelect10( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_SYNCHRONIZE_CACHE_10: /* on error what to return, sharing CB with satSynchronizeCache16 */ TI_DBG5(("satIOStart: SCSIOPC_SYNCHRONIZE_CACHE_10\n")); retVal = satSynchronizeCache10( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_SYNCHRONIZE_CACHE_16:/* on error what to return, sharing CB with satSynchronizeCache16 */ TI_DBG5(("satIOStart: SCSIOPC_SYNCHRONIZE_CACHE_16\n")); retVal = satSynchronizeCache16( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_WRITE_AND_VERIFY_10: /* single write and multiple writes */ TI_DBG5(("satIOStart: SCSIOPC_WRITE_AND_VERIFY_10\n")); retVal = satWriteAndVerify10( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_WRITE_AND_VERIFY_12: TI_DBG5(("satIOStart: SCSIOPC_WRITE_AND_VERIFY_12\n")); retVal = satWriteAndVerify12( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_WRITE_AND_VERIFY_16: TI_DBG5(("satIOStart: SCSIOPC_WRITE_AND_VERIFY_16\n")); retVal = satWriteAndVerify16( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_READ_MEDIA_SERIAL_NUMBER: TI_DBG5(("satIOStart: SCSIOPC_READ_MEDIA_SERIAL_NUMBER\n")); retVal = satReadMediaSerialNumber( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_READ_BUFFER: TI_DBG5(("satIOStart: SCSIOPC_READ_BUFFER\n")); retVal = satReadBuffer( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_WRITE_BUFFER: TI_DBG5(("satIOStart: SCSIOPC_WRITE_BUFFER\n")); retVal = satWriteBuffer( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; case SCSIOPC_REASSIGN_BLOCKS: TI_DBG5(("satIOStart: SCSIOPC_REASSIGN_BLOCKS\n")); retVal = satReassignBlocks( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); break; default: /* Not implemented SCSI cmd, set up error response */ TI_DBG1(("satIOStart: unsupported SCSI cdb[0]=0x%x tiIORequest=%p tiDeviceHandle=%p\n", scsiCmnd->cdb[0], tiIORequest, tiDeviceHandle)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); retVal = tiSuccess; break; } /* end switch */ } if (retVal == tiBusy) { #ifdef TD_DEBUG_ENABLE oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; #endif TI_DBG1(("satIOStart: BUSY did %d\n", oneDeviceData->id)); TI_DBG3(("satIOStart: LL is busy or target queue is full\n")); TI_DBG3(("satIOStart: device %p satPendingIO %d satNCQMaxIO %d\n",pSatDevData, pSatDevData->satPendingIO, pSatDevData->satNCQMaxIO )); TI_DBG3(("satIOStart: device %p satPendingNCQIO %d satPendingNONNCQIO %d\n",pSatDevData, pSatDevData->satPendingNCQIO, pSatDevData->satPendingNONNCQIO)); pSatIntIo = satIOContext->satIntIoContext; /* interal structure free */ satFreeIntIoResource( tiRoot, pSatDevData, pSatIntIo); } ext: return retVal; } /*****************************************************************************/ /*! \brief Setup up the SCSI Sense response. * * This function is used to setup up the Sense Data payload for * CHECK CONDITION status. * * \param pSense: Pointer to the scsiRspSense_t sense data structure. * \param SnsKey: SCSI Sense Key. * \param SnsInfo: SCSI Sense Info. * \param SnsCode: SCSI Sense Code. * * \return: None */ /*****************************************************************************/ void satSetSensePayload( scsiRspSense_t *pSense, bit8 SnsKey, bit32 SnsInfo, bit16 SnsCode, satIOContext_t *satIOContext ) { /* for fixed format sense data, SPC-4, p37 */ bit32 i; bit32 senseLength; TI_DBG5(("satSetSensePayload: start\n")); senseLength = sizeof(scsiRspSense_t); /* zero out the data area */ for (i=0;i< senseLength;i++) { ((bit8*)pSense)[i] = 0; } /* * SCSI Sense Data part of response data */ pSense->snsRespCode = 0x70; /* 0xC0 == vendor specific */ /* 0x70 == standard current error */ pSense->senseKey = SnsKey; /* * Put sense info in scsi order format */ pSense->info[0] = (bit8)((SnsInfo >> 24) & 0xff); pSense->info[1] = (bit8)((SnsInfo >> 16) & 0xff); pSense->info[2] = (bit8)((SnsInfo >> 8) & 0xff); pSense->info[3] = (bit8)((SnsInfo) & 0xff); pSense->addSenseLen = 11; /* fixed size of sense data = 18 */ pSense->addSenseCode = (bit8)((SnsCode >> 8) & 0xFF); pSense->senseQual = (bit8)(SnsCode & 0xFF); /* * Set pointer in scsi status */ switch(SnsKey) { /* * set illegal request sense key specific error in cdb, no bit pointer */ case SCSI_SNSKEY_ILLEGAL_REQUEST: pSense->skeySpecific[0] = 0xC8; break; default: break; } /* setting sense data length */ if (satIOContext != agNULL) { satIOContext->pTiSenseData->senseLen = 18; } else { TI_DBG1(("satSetSensePayload: satIOContext is NULL\n")); } } /*****************************************************************************/ /*! \brief Setup up the SCSI Sense response. * * This function is used to setup up the Sense Data payload for * CHECK CONDITION status. * * \param pSense: Pointer to the scsiRspSense_t sense data structure. * \param SnsKey: SCSI Sense Key. * \param SnsInfo: SCSI Sense Info. * \param SnsCode: SCSI Sense Code. * * \return: None */ /*****************************************************************************/ void satSetDeferredSensePayload( scsiRspSense_t *pSense, bit8 SnsKey, bit32 SnsInfo, bit16 SnsCode, satIOContext_t *satIOContext ) { /* for fixed format sense data, SPC-4, p37 */ bit32 i; bit32 senseLength; senseLength = sizeof(scsiRspSense_t); /* zero out the data area */ for (i=0;i< senseLength;i++) { ((bit8*)pSense)[i] = 0; } /* * SCSI Sense Data part of response data */ pSense->snsRespCode = 0x71; /* 0xC0 == vendor specific */ /* 0x70 == standard current error */ pSense->senseKey = SnsKey; /* * Put sense info in scsi order format */ pSense->info[0] = (bit8)((SnsInfo >> 24) & 0xff); pSense->info[1] = (bit8)((SnsInfo >> 16) & 0xff); pSense->info[2] = (bit8)((SnsInfo >> 8) & 0xff); pSense->info[3] = (bit8)((SnsInfo) & 0xff); pSense->addSenseLen = 11; /* fixed size of sense data = 18 */ pSense->addSenseCode = (bit8)((SnsCode >> 8) & 0xFF); pSense->senseQual = (bit8)(SnsCode & 0xFF); /* * Set pointer in scsi status */ switch(SnsKey) { /* * set illegal request sense key specific error in cdb, no bit pointer */ case SCSI_SNSKEY_ILLEGAL_REQUEST: pSense->skeySpecific[0] = 0xC8; break; default: break; } /* setting sense data length */ if (satIOContext != agNULL) { satIOContext->pTiSenseData->senseLen = 18; } else { TI_DBG1(("satSetDeferredSensePayload: satIOContext is NULL\n")); } } /*****************************************************************************/ /*! \brief SAT implementation for ATAPI Packet Command. * * SAT implementation for ATAPI Packet and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satPacket( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_D2H_PKT; satDeviceData_t *pSatDevData; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG3(("satPacket: start, SCSI CDB is 0x%X %X %X %X %X %X %X %X %X %X %X %X\n", scsiCmnd->cdb[0],scsiCmnd->cdb[1],scsiCmnd->cdb[2],scsiCmnd->cdb[3], scsiCmnd->cdb[4],scsiCmnd->cdb[5],scsiCmnd->cdb[6],scsiCmnd->cdb[7], scsiCmnd->cdb[8],scsiCmnd->cdb[9],scsiCmnd->cdb[10],scsiCmnd->cdb[11])); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set 1*/ fis->h.command = SAT_PACKET; /* 0xA0 */ if (pSatDevData->satDMADIRSupport) /* DMADIR enabled*/ { fis->h.features = (tiScsiRequest->dataDirection == tiDirectionIn)? 0x04 : 0; /* 1 for D2H, 0 for H2D */ } else { fis->h.features = 0; /* FIS reserve */ } /* Byte count low and byte count high */ if ( scsiCmnd->expDataLength > 0xFFFF ) { fis->d.lbaMid = 0xFF; /* FIS LBA (7 :0 ) */ fis->d.lbaHigh = 0xFF; /* FIS LBA (15:8 ) */ } else { fis->d.lbaMid = (bit8)scsiCmnd->expDataLength; /* FIS LBA (7 :0 ) */ fis->d.lbaHigh = (bit8)(scsiCmnd->expDataLength>>8); /* FIS LBA (15:8 ) */ } fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.device = 0; /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; satIOContext->ATACmd = SAT_PACKET; if (tiScsiRequest->dataDirection == tiDirectionIn) { agRequestType = AGSA_SATA_PROTOCOL_D2H_PKT; } else { agRequestType = AGSA_SATA_PROTOCOL_H2D_PKT; } if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /*DMA transfer mode*/ fis->h.features |= 0x01; } else { /*PIO transfer mode*/ fis->h.features |= 0x0; } satIOContext->satCompleteCB = &satPacketCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satPacket: return\n")); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for satSetFeatures. * * This function creates SetFeatures fis and sends the request to LL layer * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satSetFeatures( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext, bit8 bIsDMAMode ) { bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; fis = satIOContext->pFis; TI_DBG3(("satSetFeatures: start\n")); /* * Send the Set Features command. */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SET_FEATURES; /* 0xEF */ fis->h.features = 0x03; /* set transfer mode */ fis->d.lbaLow = 0; fis->d.lbaMid = 0; fis->d.lbaHigh = 0; fis->d.device = 0; fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ if (bIsDMAMode) { fis->d.sectorCount = 0x45; /*satIOContext->satCompleteCB = &satSetFeaturesDMACB;*/ } else { fis->d.sectorCount = 0x0C; /*satIOContext->satCompleteCB = &satSetFeaturesPIOCB;*/ } satIOContext->satCompleteCB = &satSetFeaturesCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satSetFeatures: return\n")); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI REQUEST SENSE to ATAPI device. * * SAT implementation for SCSI REQUEST SENSE. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satRequestSenseForATAPI( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_D2H_PKT; satDeviceData_t *pSatDevData; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; scsiCmnd->cdb[0] = SCSIOPC_REQUEST_SENSE; scsiCmnd->cdb[1] = 0; scsiCmnd->cdb[2] = 0; scsiCmnd->cdb[3] = 0; scsiCmnd->cdb[4] = SENSE_DATA_LENGTH; scsiCmnd->cdb[5] = 0; TI_DBG3(("satRequestSenseForATAPI: start, SCSI CDB is 0x%X %X %X %X %X %X %X %X %X %X %X %X\n", scsiCmnd->cdb[0],scsiCmnd->cdb[1],scsiCmnd->cdb[2],scsiCmnd->cdb[3], scsiCmnd->cdb[4],scsiCmnd->cdb[5],scsiCmnd->cdb[6],scsiCmnd->cdb[7], scsiCmnd->cdb[8],scsiCmnd->cdb[9],scsiCmnd->cdb[10],scsiCmnd->cdb[11])); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set 1*/ fis->h.command = SAT_PACKET; /* 0xA0 */ if (pSatDevData->satDMADIRSupport) /* DMADIR enabled*/ { fis->h.features = (tiScsiRequest->dataDirection == tiDirectionIn)? 0x04 : 0; /* 1 for D2H, 0 for H2D */ } else { fis->h.features = 0; /* FIS reserve */ } fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0x20; /* FIS LBA (23:16) */ fis->d.device = 0; /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = (bit32)(scsiCmnd->cdb[0]|(scsiCmnd->cdb[1]<<8)|(scsiCmnd->cdb[2]<<16)|(scsiCmnd->cdb[3]<<24)); satIOContext->ATACmd = SAT_PACKET; agRequestType = AGSA_SATA_PROTOCOL_D2H_PKT; //if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { fis->h.features |= 0x01; } else { fis->h.features |= 0x0; } } satIOContext->satCompleteCB = &satRequestSenseForATAPICB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satRequestSenseForATAPI: return\n")); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for satDeviceReset. * * This function creates DEVICE RESET fis and sends the request to LL layer * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satDeviceReset( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext ) { bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; fis = satIOContext->pFis; TI_DBG3(("satDeviceReset: start\n")); /* * Send the Execute Device Diagnostic command. */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_DEVICE_RESET; /* 0x90 */ fis->h.features = 0; fis->d.lbaLow = 0; fis->d.lbaMid = 0; fis->d.lbaHigh = 0; fis->d.device = 0; fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DEV_RESET; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satDeviceResetCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG3(("satDeviceReset: return\n")); return status; } /*****************************************************************************/ /*! \brief SAT implementation for saExecuteDeviceDiagnostic. * * This function creates Execute Device Diagnostic fis and sends the request to LL layer * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satExecuteDeviceDiagnostic( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext ) { bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; fis = satIOContext->pFis; TI_DBG3(("satExecuteDeviceDiagnostic: start\n")); /* * Send the Execute Device Diagnostic command. */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_EXECUTE_DEVICE_DIAGNOSTIC; /* 0x90 */ fis->h.features = 0; fis->d.lbaLow = 0; fis->d.lbaMid = 0; fis->d.lbaHigh = 0; fis->d.device = 0; fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satExecuteDeviceDiagnosticCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satExecuteDeviceDiagnostic: return\n")); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI READ10. * * SAT implementation for SCSI READ10 and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satRead10( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[4]; bit8 TL[4]; bit32 rangeChk = agFALSE; /* lba and tl range check */ pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satRead10: start\n")); TI_DBG5(("satRead10: pSatDevData=%p\n", pSatDevData)); // tdhexdump("satRead10", (bit8 *)scsiCmnd->cdb, 10); /* checking FUA_NV */ if (scsiCmnd->cdb[1] & SCSI_FUA_NV_MASK) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satRead10: return FUA_NV\n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satRead10: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; /* LSB */ TL[0] = 0; TL[1] = 0; TL[2] = scsiCmnd->cdb[7]; /* MSB */ TL[3] = scsiCmnd->cdb[8]; /* LSB */ rangeChk = satAddNComparebit32(LBA, TL); /* cbd10; computing LBA and transfer length */ lba = (scsiCmnd->cdb[2] << (8*3)) + (scsiCmnd->cdb[3] << (8*2)) + (scsiCmnd->cdb[4] << 8) + scsiCmnd->cdb[5]; tl = (scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]; TI_DBG5(("satRead10: lba %d functioned lba %d\n", lba, satComputeCDB10LBA(satIOContext))); TI_DBG5(("satRead10: lba 0x%x functioned lba 0x%x\n", lba, satComputeCDB10LBA(satIOContext))); TI_DBG5(("satRead10: tl %d functioned tl %d\n", tl, satComputeCDB10TL(satIOContext))); /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { TI_DBG1(("satRead10: return LBA out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satRead10: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* case 1 and 2 */ if (!rangeChk) // if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* READ DMA*/ /* in case that we can't fit the transfer length, we need to make it fit by sending multiple ATA cmnds */ TI_DBG5(("satRead10: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_DMA; /* 0xC8 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; satIOContext->ATACmd = SAT_READ_DMA; } else { /* case 1 */ /* READ MULTIPLE or READ SECTOR(S) */ /* READ SECTORS for easier implemetation */ /* in case that we can't fit the transfer length, we need to make it fit by sending multiple ATA cmnds */ TI_DBG5(("satRead10: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_SECTORS; /* 0x20 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; satIOContext->ATACmd = SAT_READ_SECTORS; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* READ DMA EXT */ TI_DBG5(("satRead10: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_DMA_EXT; /* 0x25 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; satIOContext->ATACmd = SAT_READ_DMA_EXT; } else { /* case 4 */ /* READ MULTIPLE EXT or READ SECTOR(S) EXT or READ VERIFY SECTOR(S) EXT*/ /* READ SECTORS EXT for easier implemetation */ TI_DBG5(("satRead10: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_READ10_FUA_MASK) { /* for now, no support for FUA */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } fis->h.command = SAT_READ_SECTORS_EXT; /* 0x24 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; satIOContext->ATACmd = SAT_READ_SECTORS_EXT; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* READ FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satRead10: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG6(("satRead10: case 5\n")); /* Support 48-bit FPDMA addressing, use READ FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_FPDMA_QUEUED; /* 0x60 */ fis->h.features = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_READ10_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_READ; satIOContext->ATACmd = SAT_READ_FPDMA_QUEUED; } // tdhexdump("satRead10 final fis", (bit8 *)fis, sizeof(agsaFisRegHostToDevice_t)); /* saves the current LBA and orginal TL */ satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_READ_SECTORS || fis->h.command == SAT_READ_DMA) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_READ_SECTORS_EXT || fis->h.command == SAT_READ_DMA_EXT) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { /* SAT_READ_FPDMA_QUEUED */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } satIOContext->LoopNum = LoopNum; /* Initialize CB for SATA completion. */ if (LoopNum == 1) { TI_DBG5(("satRead10: NON CHAINED data\n")); satIOContext->satCompleteCB = &satNonChainedDataIOCB; } else { TI_DBG1(("satRead10: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_READ_SECTORS || fis->h.command == SAT_READ_DMA) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_READ_SECTORS_EXT || fis->h.command == SAT_READ_DMA_EXT) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { /* SAT_READ_FPDMA_QUEUED */ fis->h.features = 0xFF; fis->d.featuresExp = 0xFF; } /* chained data */ satIOContext->satCompleteCB = &satChainedDataIOCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satRead10: return\n")); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satRead_1. * * SAT implementation for SCSI satRead_1 * Sub function of satRead10 * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ /* * as a part of loop for read10 */ GLOBAL bit32 satRead_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* Assumption: error check on lba and tl has been done in satRead*() lba = lba + tl; */ bit32 status; satIOContext_t *satOrgIOContext = agNULL; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; bit32 lba = 0; bit32 DenomTL = 0xFF; bit32 Remainder = 0; bit8 LBA[4]; /* 0 MSB, 3 LSB */ TI_DBG2(("satRead_1: start\n")); fis = satIOContext->pFis; satOrgIOContext = satIOContext->satOrgIOContext; scsiCmnd = satOrgIOContext->pScsiCmnd; osti_memset(LBA,0, sizeof(LBA)); switch (satOrgIOContext->ATACmd) { case SAT_READ_DMA: DenomTL = 0xFF; break; case SAT_READ_SECTORS: DenomTL = 0xFF; break; case SAT_READ_DMA_EXT: DenomTL = 0xFFFF; break; case SAT_READ_SECTORS_EXT: DenomTL = 0xFFFF; break; case SAT_READ_FPDMA_QUEUED: DenomTL = 0xFFFF; break; default: TI_DBG1(("satRead_1: error incorrect ata command 0x%x\n", satIOContext->ATACmd)); return tiError; break; } Remainder = satOrgIOContext->OrgTL % DenomTL; satOrgIOContext->currentLBA = satOrgIOContext->currentLBA + DenomTL; lba = satOrgIOContext->currentLBA; LBA[0] = (bit8)((lba & 0xF000) >> (8 * 3)); LBA[1] = (bit8)((lba & 0xF00) >> (8 * 2)); LBA[2] = (bit8)((lba & 0xF0) >> 8); LBA[3] = (bit8)(lba & 0xF); switch (satOrgIOContext->ATACmd) { case SAT_READ_DMA: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_DMA; /* 0xC8 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ fis->d.device = (bit8)((0x4 << 4) | (LBA[0] & 0xF)); /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)Remainder; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; break; case SAT_READ_SECTORS: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_SECTORS; /* 0x20 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ fis->d.device = (bit8)((0x4 << 4) | (LBA[0] & 0xF)); /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)Remainder; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; break; case SAT_READ_DMA_EXT: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_DMA_EXT; /* 0x25 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = LBA[0]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.sectorCountExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; break; case SAT_READ_SECTORS_EXT: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_SECTORS_EXT; /* 0x24 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = LBA[0]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.sectorCountExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; break; case SAT_READ_FPDMA_QUEUED: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_FPDMA_QUEUED; /* 0x60 */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_READ10_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = LBA[0]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->h.features = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.featuresExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->h.features = 0xFF; /* FIS sector count (7:0) */ fis->d.featuresExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_READ; break; default: TI_DBG1(("satRead_1: error incorrect ata command 0x%x\n", satIOContext->ATACmd)); return tiError; break; } /* Initialize CB for SATA completion. */ /* chained data */ satIOContext->satCompleteCB = &satChainedDataIOCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satRead_1: return\n")); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI READ12. * * SAT implementation for SCSI READ12 and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satRead12( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[4]; bit8 TL[4]; bit32 rangeChk = agFALSE; /* lba and tl range check */ pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satRead12: start\n")); /* checking FUA_NV */ if (scsiCmnd->cdb[1] & SCSI_FUA_NV_MASK) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satRead12: return FUA_NV\n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[11] & SCSI_NACA_MASK) || (scsiCmnd->cdb[11] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satRead12: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; /* LSB */ TL[0] = scsiCmnd->cdb[6]; /* MSB */ TL[1] = scsiCmnd->cdb[7]; TL[2] = scsiCmnd->cdb[8]; TL[3] = scsiCmnd->cdb[9]; /* LSB */ rangeChk = satAddNComparebit32(LBA, TL); lba = satComputeCDB12LBA(satIOContext); tl = satComputeCDB12TL(satIOContext); /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { TI_DBG1(("satRead12: return LBA out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satRead12: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* case 1 and 2 */ if (!rangeChk) // if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* READ DMA*/ /* in case that we can't fit the transfer length, we need to make it fit by sending multiple ATA cmnds */ TI_DBG5(("satRead12: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_DMA; /* 0xC8 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; satIOContext->ATACmd = SAT_READ_DMA; } else { /* case 1 */ /* READ MULTIPLE or READ SECTOR(S) */ /* READ SECTORS for easier implemetation */ /* can't fit the transfer length but need to make it fit by sending multiple*/ TI_DBG5(("satRead12: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_SECTORS; /* 0x20 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; satIOContext->ATACmd = SAT_READ_SECTORS; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* READ DMA EXT */ TI_DBG5(("satRead12: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_DMA_EXT; /* 0x25 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[8]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; satIOContext->ATACmd = SAT_READ_DMA_EXT; } else { /* case 4 */ /* READ MULTIPLE EXT or READ SECTOR(S) EXT or READ VERIFY SECTOR(S) EXT*/ /* READ SECTORS EXT for easier implemetation */ TI_DBG5(("satRead12: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_READ12_FUA_MASK) { /* for now, no support for FUA */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } fis->h.command = SAT_READ_SECTORS_EXT; /* 0x24 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[8]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; satIOContext->ATACmd = SAT_READ_SECTORS_EXT; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* READ FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satRead12: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG6(("satRead12: case 5\n")); /* Support 48-bit FPDMA addressing, use READ FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_FPDMA_QUEUED; /* 0x60 */ fis->h.features = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_READ12_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = scsiCmnd->cdb[8]; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_READ; satIOContext->ATACmd = SAT_READ_FPDMA_QUEUED; } /* saves the current LBA and orginal TL */ satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_READ_SECTORS || fis->h.command == SAT_READ_DMA) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_READ_SECTORS_EXT || fis->h.command == SAT_READ_DMA_EXT) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { /* SAT_READ_FPDMA_QUEUEDK */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satRead12: NON CHAINED data\n")); satIOContext->satCompleteCB = &satNonChainedDataIOCB; } else { TI_DBG1(("satRead12: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_READ_SECTORS || fis->h.command == SAT_READ_DMA) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_READ_SECTORS_EXT || fis->h.command == SAT_READ_DMA_EXT) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { /* SAT_READ_FPDMA_QUEUED */ fis->h.features = 0xFF; fis->d.featuresExp = 0xFF; } /* chained data */ satIOContext->satCompleteCB = &satChainedDataIOCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satRead12: return\n")); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI READ16. * * SAT implementation for SCSI READ16 and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satRead16( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[8]; bit8 TL[8]; bit32 rangeChk = agFALSE; /* lba and tl range check */ bit32 limitChk = agFALSE; /* lba and tl range check */ pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satRead16: start\n")); /* checking FUA_NV */ if (scsiCmnd->cdb[1] & SCSI_FUA_NV_MASK) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satRead16: return FUA_NV\n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[15] & SCSI_NACA_MASK) || (scsiCmnd->cdb[15] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satRead16: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; LBA[4] = scsiCmnd->cdb[6]; LBA[5] = scsiCmnd->cdb[7]; LBA[6] = scsiCmnd->cdb[8]; LBA[7] = scsiCmnd->cdb[9]; /* LSB */ TL[0] = 0; TL[1] = 0; TL[2] = 0; TL[3] = 0; TL[4] = scsiCmnd->cdb[10]; /* MSB */ TL[5] = scsiCmnd->cdb[11]; TL[6] = scsiCmnd->cdb[12]; TL[7] = scsiCmnd->cdb[13]; /* LSB */ rangeChk = satAddNComparebit64(LBA, TL); limitChk = satCompareLBALimitbit(LBA); lba = satComputeCDB16LBA(satIOContext); tl = satComputeCDB16TL(satIOContext); /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (limitChk) { TI_DBG1(("satRead16: return LBA out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satRead16: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* case 1 and 2 */ if (!rangeChk) // if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* READ DMA*/ /* in case that we can't fit the transfer length, we need to make it fit by sending multiple ATA cmnds */ TI_DBG5(("satRead16: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_DMA; /* 0xC8 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[6] & 0xF)); /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; satIOContext->ATACmd = SAT_READ_DMA; } else { /* case 1 */ /* READ MULTIPLE or READ SECTOR(S) */ /* READ SECTORS for easier implemetation */ /* can't fit the transfer length but need to make it fit by sending multiple*/ TI_DBG5(("satRead16: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_SECTORS; /* 0x20 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[6] & 0xF)); /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; satIOContext->ATACmd = SAT_READ_SECTORS; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* READ DMA EXT */ TI_DBG5(("satRead16: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_DMA_EXT; /* 0x25 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[6]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = scsiCmnd->cdb[5]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = scsiCmnd->cdb[4]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[12]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; satIOContext->ATACmd = SAT_READ_DMA_EXT; } else { /* case 4 */ /* READ MULTIPLE EXT or READ SECTOR(S) EXT or READ VERIFY SECTOR(S) EXT*/ /* READ SECTORS EXT for easier implemetation */ TI_DBG5(("satRead16: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_READ16_FUA_MASK) { /* for now, no support for FUA */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } fis->h.command = SAT_READ_SECTORS_EXT; /* 0x24 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[6]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = scsiCmnd->cdb[5]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = scsiCmnd->cdb[4]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[12]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; satIOContext->ATACmd = SAT_READ_SECTORS_EXT; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* READ FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satRead16: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG6(("satRead16: case 5\n")); /* Support 48-bit FPDMA addressing, use READ FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_FPDMA_QUEUED; /* 0x60 */ fis->h.features = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_READ16_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[6]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = scsiCmnd->cdb[5]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = scsiCmnd->cdb[4]; /* FIS LBA (47:40) */ fis->d.featuresExp = scsiCmnd->cdb[12]; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_READ; satIOContext->ATACmd = SAT_READ_FPDMA_QUEUED; } /* saves the current LBA and orginal TL */ satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_READ_SECTORS || fis->h.command == SAT_READ_DMA) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_READ_SECTORS_EXT || fis->h.command == SAT_READ_DMA_EXT) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { /* SAT_READ_FPDMA_QUEUEDK */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satRead16: NON CHAINED data\n")); satIOContext->satCompleteCB = &satNonChainedDataIOCB; } else { TI_DBG1(("satRead16: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_READ_SECTORS || fis->h.command == SAT_READ_DMA) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_READ_SECTORS_EXT || fis->h.command == SAT_READ_DMA_EXT) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { /* SAT_READ_FPDMA_QUEUED */ fis->h.features = 0xFF; fis->d.featuresExp = 0xFF; } /* chained data */ satIOContext->satCompleteCB = &satChainedDataIOCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satRead16: return\n")); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI READ6. * * SAT implementation for SCSI READ6 and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satRead6( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit16 tl = 0; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satRead6: start\n")); /* no FUA checking since read6 */ /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satRead6: return control\n")); return tiSuccess; } /* cbd6; computing LBA and transfer length */ lba = (((scsiCmnd->cdb[1]) & 0x1f) << (8*2)) + (scsiCmnd->cdb[2] << 8) + scsiCmnd->cdb[3]; tl = scsiCmnd->cdb[4]; /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satRead6: return LBA out of range\n")); return tiSuccess; } } /* case 1 and 2 */ if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* READ DMA*/ TI_DBG5(("satRead6: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_DMA; /* 0xC8 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = (bit8)((scsiCmnd->cdb[1]) & 0x1f); /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (tl == 0) { /* temporary fix */ fis->d.sectorCount = 0xff; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = scsiCmnd->cdb[4]; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; } else { /* case 1 */ /* READ SECTORS for easier implemetation */ TI_DBG5(("satRead6: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_SECTORS; /* 0x20 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = (bit8)((scsiCmnd->cdb[1]) & 0x1f); /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (tl == 0) { /* temporary fix */ fis->d.sectorCount = 0xff; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = scsiCmnd->cdb[4]; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* READ DMA EXT only */ TI_DBG5(("satRead6: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_DMA_EXT; /* 0x25 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = (bit8)((scsiCmnd->cdb[1]) & 0x1f); /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (tl == 0) { /* sector count is 256, 0x100*/ fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0x01; /* FIS sector count (15:8) */ } else { fis->d.sectorCount = scsiCmnd->cdb[4]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_READ; } else { /* case 4 */ /* READ SECTORS EXT for easier implemetation */ TI_DBG5(("satRead6: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_SECTORS_EXT; /* 0x24 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = (bit8)((scsiCmnd->cdb[1]) & 0x1f); /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (tl == 0) { /* sector count is 256, 0x100*/ fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0x01; /* FIS sector count (15:8) */ } else { fis->d.sectorCount = scsiCmnd->cdb[4]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* READ FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { /* sanity check */ TI_DBG5(("satRead6: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG5(("satRead6: case 5\n")); /* Support 48-bit FPDMA addressing, use READ FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_FPDMA_QUEUED; /* 0x60 */ fis->d.lbaLow = scsiCmnd->cdb[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = (bit8)((scsiCmnd->cdb[1]) & 0x1f); /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ if (tl == 0) { /* sector count is 256, 0x100*/ fis->h.features = 0; /* FIS sector count (7:0) */ fis->d.featuresExp = 0x01; /* FIS sector count (15:8) */ } else { fis->h.features = scsiCmnd->cdb[4]; /* FIS sector count (7:0) */ fis->d.featuresExp = 0; /* FIS sector count (15:8) */ } fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_READ; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedDataIOCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI WRITE16. * * SAT implementation for SCSI WRITE16 and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWrite16( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[8]; bit8 TL[8]; bit32 rangeChk = agFALSE; /* lba and tl range check */ bit32 limitChk = agFALSE; /* lba and tl range check */ pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satWrite16: start\n")); /* checking FUA_NV */ if (scsiCmnd->cdb[1] & SCSI_FUA_NV_MASK) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWrite16: return FUA_NV\n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[15] & SCSI_NACA_MASK) || (scsiCmnd->cdb[15] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWrite16: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; LBA[4] = scsiCmnd->cdb[6]; LBA[5] = scsiCmnd->cdb[7]; LBA[6] = scsiCmnd->cdb[8]; LBA[7] = scsiCmnd->cdb[9]; /* LSB */ TL[0] = 0; TL[1] = 0; TL[2] = 0; TL[3] = 0; TL[4] = scsiCmnd->cdb[10]; /* MSB */ TL[5] = scsiCmnd->cdb[11]; TL[6] = scsiCmnd->cdb[12]; TL[7] = scsiCmnd->cdb[13]; /* LSB */ rangeChk = satAddNComparebit64(LBA, TL); limitChk = satCompareLBALimitbit(LBA); lba = satComputeCDB16LBA(satIOContext); tl = satComputeCDB16TL(satIOContext); /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (limitChk) { TI_DBG1(("satWrite16: return LBA out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satWrite16: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* case 1 and 2 */ if (!rangeChk) // if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* WRITE DMA*/ /* In case that we can't fit the transfer length, we loop */ TI_DBG5(("satWrite16: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[6] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA; } else { /* case 1 */ /* WRITE MULTIPLE or WRITE SECTOR(S) */ /* WRITE SECTORS for easier implemetation */ /* In case that we can't fit the transfer length, we loop */ TI_DBG5(("satWrite16: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0x30 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[6] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* WRITE DMA EXT or WRITE DMA FUA EXT */ TI_DBG5(("satWrite16: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* SAT_WRITE_DMA_FUA_EXT is optional and we don't support it */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[6]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = scsiCmnd->cdb[5]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = scsiCmnd->cdb[4]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[12]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA_EXT; } else { /* case 4 */ /* WRITE MULTIPLE EXT or WRITE MULTIPLE FUA EXT or WRITE SECTOR(S) EXT */ /* WRITE SECTORS EXT for easier implemetation */ TI_DBG5(("satWrite16: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[6]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = scsiCmnd->cdb[5]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = scsiCmnd->cdb[4]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[12]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS_EXT; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* WRITE FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satWrite16: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG6(("satWrite16: case 5\n")); /* Support 48-bit FPDMA addressing, use WRITE FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->h.features = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE16_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[6]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = scsiCmnd->cdb[5]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = scsiCmnd->cdb[4]; /* FIS LBA (47:40) */ fis->d.featuresExp = scsiCmnd->cdb[12]; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; satIOContext->ATACmd = SAT_WRITE_FPDMA_QUEUED; } satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { /* SAT_WRITE_FPDMA_QUEUEDK */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satWrite16: NON CHAINED data\n")); /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedDataIOCB; } else { TI_DBG1(("satWrite16: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { /* SAT_WRITE_FPDMA_QUEUED */ fis->h.features = 0xFF; fis->d.featuresExp = 0xFF; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satChainedDataIOCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI WRITE12. * * SAT implementation for SCSI WRITE12 and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWrite12( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[4]; bit8 TL[4]; bit32 rangeChk = agFALSE; /* lba and tl range check */ pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satWrite12: start\n")); /* checking FUA_NV */ if (scsiCmnd->cdb[1] & SCSI_FUA_NV_MASK) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWrite12: return FUA_NV\n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[11] & SCSI_NACA_MASK) || (scsiCmnd->cdb[11] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWrite12: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; /* LSB */ TL[0] = scsiCmnd->cdb[6]; /* MSB */ TL[1] = scsiCmnd->cdb[7]; TL[2] = scsiCmnd->cdb[8]; TL[3] = scsiCmnd->cdb[9]; /* LSB */ rangeChk = satAddNComparebit32(LBA, TL); lba = satComputeCDB12LBA(satIOContext); tl = satComputeCDB12TL(satIOContext); /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWrite12: return LBA out of range, not EXT\n")); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satWrite12: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* case 1 and 2 */ if (!rangeChk) // if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* WRITE DMA*/ /* In case that we can't fit the transfer length, we loop */ TI_DBG5(("satWrite12: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA; } else { /* case 1 */ /* WRITE MULTIPLE or WRITE SECTOR(S) */ /* WRITE SECTORS for easier implemetation */ /* In case that we can't fit the transfer length, we loop */ TI_DBG5(("satWrite12: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0x30 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* WRITE DMA EXT or WRITE DMA FUA EXT */ TI_DBG5(("satWrite12: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* SAT_WRITE_DMA_FUA_EXT is optional and we don't support it */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[8]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA_EXT; } else { /* case 4 */ /* WRITE MULTIPLE EXT or WRITE MULTIPLE FUA EXT or WRITE SECTOR(S) EXT */ /* WRITE SECTORS EXT for easier implemetation */ TI_DBG5(("satWrite12: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[8]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS_EXT; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* WRITE FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satWrite12: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG6(("satWrite12: case 5\n")); /* Support 48-bit FPDMA addressing, use WRITE FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->h.features = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE12_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = scsiCmnd->cdb[8]; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; satIOContext->ATACmd = SAT_WRITE_FPDMA_QUEUED; } satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { /* SAT_WRITE_FPDMA_QUEUEDK */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satWrite12: NON CHAINED data\n")); /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedDataIOCB; } else { TI_DBG1(("satWrite12: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { /* SAT_WRITE_FPDMA_QUEUED */ fis->h.features = 0xFF; fis->d.featuresExp = 0xFF; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satChainedDataIOCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI WRITE10. * * SAT implementation for SCSI WRITE10 and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWrite10( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[4]; bit8 TL[4]; bit32 rangeChk = agFALSE; /* lba and tl range check */ pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satWrite10: start\n")); /* checking FUA_NV */ if (scsiCmnd->cdb[1] & SCSI_FUA_NV_MASK) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWrite10: return FUA_NV\n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWrite10: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; /* LSB */ TL[0] = 0; TL[1] = 0; TL[2] = scsiCmnd->cdb[7]; /* MSB */ TL[3] = scsiCmnd->cdb[8]; /* LSB */ rangeChk = satAddNComparebit32(LBA, TL); /* cbd10; computing LBA and transfer length */ lba = (scsiCmnd->cdb[2] << (8*3)) + (scsiCmnd->cdb[3] << (8*2)) + (scsiCmnd->cdb[4] << 8) + scsiCmnd->cdb[5]; tl = (scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]; TI_DBG5(("satWrite10: lba %d functioned lba %d\n", lba, satComputeCDB10LBA(satIOContext))); TI_DBG5(("satWrite10: tl %d functioned tl %d\n", tl, satComputeCDB10TL(satIOContext))); /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWrite10: return LBA out of range, not EXT\n")); TI_DBG1(("satWrite10: cdb 0x%x 0x%x 0x%x 0x%x\n",scsiCmnd->cdb[2], scsiCmnd->cdb[3], scsiCmnd->cdb[4], scsiCmnd->cdb[5])); TI_DBG1(("satWrite10: lba 0x%x SAT_TR_LBA_LIMIT 0x%x\n", lba, SAT_TR_LBA_LIMIT)); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satWrite10: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* case 1 and 2 */ if (!rangeChk) // if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* WRITE DMA*/ /* can't fit the transfer length */ TI_DBG5(("satWrite10: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA; } else { /* case 1 */ /* WRITE MULTIPLE or WRITE SECTOR(S) */ /* WRITE SECTORS for easier implemetation */ /* can't fit the transfer length */ TI_DBG5(("satWrite10: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0x30 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* WRITE DMA EXT or WRITE DMA FUA EXT */ TI_DBG5(("satWrite10: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* SAT_WRITE_DMA_FUA_EXT is optional and we don't support it */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ satIOContext->ATACmd = SAT_WRITE_DMA_EXT; fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; } else { /* case 4 */ /* WRITE MULTIPLE EXT or WRITE MULTIPLE FUA EXT or WRITE SECTOR(S) EXT */ /* WRITE SECTORS EXT for easier implemetation */ TI_DBG5(("satWrite10: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS_EXT; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* WRITE FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satWrite10: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG6(("satWrite10: case 5\n")); /* Support 48-bit FPDMA addressing, use WRITE FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->h.features = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE10_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; satIOContext->ATACmd = SAT_WRITE_FPDMA_QUEUED; } // tdhexdump("satWrite10 final fis", (bit8 *)fis, sizeof(agsaFisRegHostToDevice_t)); satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { /* SAT_WRITE_FPDMA_QUEUEDK */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satWrite10: NON CHAINED data\n")); /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedDataIOCB; } else { TI_DBG1(("satWrite10: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { /* SAT_WRITE_FPDMA_QUEUED */ fis->h.features = 0xFF; fis->d.featuresExp = 0xFF; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satChainedDataIOCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWrite_1. * * SAT implementation for SCSI WRITE10 and send FIS request to LL layer. * This is used when WRITE10 is divided into multiple ATA commands * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWrite_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* Assumption: error check on lba and tl has been done in satWrite*() lba = lba + tl; */ bit32 status; satIOContext_t *satOrgIOContext = agNULL; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; bit32 lba = 0; bit32 DenomTL = 0xFF; bit32 Remainder = 0; bit8 LBA[4]; /* 0 MSB, 3 LSB */ TI_DBG2(("satWrite_1: start\n")); fis = satIOContext->pFis; satOrgIOContext = satIOContext->satOrgIOContext; scsiCmnd = satOrgIOContext->pScsiCmnd; osti_memset(LBA,0, sizeof(LBA)); switch (satOrgIOContext->ATACmd) { case SAT_WRITE_DMA: DenomTL = 0xFF; break; case SAT_WRITE_SECTORS: DenomTL = 0xFF; break; case SAT_WRITE_DMA_EXT: DenomTL = 0xFFFF; break; case SAT_WRITE_DMA_FUA_EXT: DenomTL = 0xFFFF; break; case SAT_WRITE_SECTORS_EXT: DenomTL = 0xFFFF; break; case SAT_WRITE_FPDMA_QUEUED: DenomTL = 0xFFFF; break; default: TI_DBG1(("satWrite_1: error incorrect ata command 0x%x\n", satIOContext->ATACmd)); return tiError; break; } Remainder = satOrgIOContext->OrgTL % DenomTL; satOrgIOContext->currentLBA = satOrgIOContext->currentLBA + DenomTL; lba = satOrgIOContext->currentLBA; LBA[0] = (bit8)((lba & 0xF000) >> (8 * 3)); /* MSB */ LBA[1] = (bit8)((lba & 0xF00) >> (8 * 2)); LBA[2] = (bit8)((lba & 0xF0) >> 8); LBA[3] = (bit8)(lba & 0xF); /* LSB */ switch (satOrgIOContext->ATACmd) { case SAT_WRITE_DMA: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (LBA[0] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)Remainder; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; break; case SAT_WRITE_SECTORS: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0x30 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (LBA[0] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)Remainder; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; break; case SAT_WRITE_DMA_EXT: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x3D */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = LBA[0]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.sectorCountExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; break; case SAT_WRITE_SECTORS_EXT: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = LBA[0]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.sectorCountExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; break; case SAT_WRITE_FPDMA_QUEUED: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE10_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = LBA[0];; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->h.features = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.featuresExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->h.features = 0xFF; /* FIS sector count (7:0) */ fis->d.featuresExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; break; default: TI_DBG1(("satWrite_1: error incorrect ata command 0x%x\n", satIOContext->ATACmd)); return tiError; break; } /* Initialize CB for SATA completion. */ /* chained data */ satIOContext->satCompleteCB = &satChainedDataIOCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satWrite_1: return\n")); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI WRITE6. * * SAT implementation for SCSI WRITE6 and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWrite6( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit16 tl = 0; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satWrite6: start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWrite6: return control\n")); return tiSuccess; } /* cbd6; computing LBA and transfer length */ lba = (((scsiCmnd->cdb[1]) & 0x1f) << (8*2)) + (scsiCmnd->cdb[2] << 8) + scsiCmnd->cdb[3]; tl = scsiCmnd->cdb[4]; /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWrite6: return LBA out of range\n")); return tiSuccess; } } /* case 1 and 2 */ if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* WRITE DMA*/ TI_DBG5(("satWrite6: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = (bit8)((scsiCmnd->cdb[1]) & 0x1f); /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (tl == 0) { /* temporary fix */ fis->d.sectorCount = 0xff; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = scsiCmnd->cdb[4]; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; } else { /* case 1 */ /* WRITE SECTORS for easier implemetation */ TI_DBG5(("satWrite6: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = (bit8)((scsiCmnd->cdb[1]) & 0x1f); /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (tl == 0) { /* temporary fix */ fis->d.sectorCount = 0xff; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = scsiCmnd->cdb[4]; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* WRITE DMA EXT only */ TI_DBG5(("satWrite6: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = (bit8)((scsiCmnd->cdb[1]) & 0x1f); /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (tl == 0) { /* sector count is 256, 0x100*/ fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0x01; /* FIS sector count (15:8) */ } else { fis->d.sectorCount = scsiCmnd->cdb[4]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; } else { /* case 4 */ /* WRITE SECTORS EXT for easier implemetation */ TI_DBG5(("satWrite6: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = (bit8)((scsiCmnd->cdb[1]) & 0x1f); /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (tl == 0) { /* sector count is 256, 0x100*/ fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0x01; /* FIS sector count (15:8) */ } else { fis->d.sectorCount = scsiCmnd->cdb[4]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* WRITE FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { /* sanity check */ TI_DBG5(("satWrite6: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG5(("satWrite6: case 5\n")); /* Support 48-bit FPDMA addressing, use WRITE FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->d.lbaLow = scsiCmnd->cdb[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = (bit8)((scsiCmnd->cdb[1]) & 0x1f); /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ if (tl == 0) { /* sector count is 256, 0x100*/ fis->h.features = 0; /* FIS sector count (7:0) */ fis->d.featuresExp = 0x01; /* FIS sector count (15:8) */ } else { fis->h.features = scsiCmnd->cdb[4]; /* FIS sector count (7:0) */ fis->d.featuresExp = 0; /* FIS sector count (15:8) */ } fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedDataIOCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI TEST UNIT READY. * * SAT implementation for SCSI TUR and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satTestUnitReady( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG6(("satTestUnitReady: entry tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satTestUnitReady: return control\n")); return tiSuccess; } /* SAT revision 8, 8.11.2, p42*/ if (pSatDevData->satStopState == agTRUE) { satSetSensePayload( pSense, SCSI_SNSKEY_NOT_READY, 0, SCSI_SNSCODE_LOGICAL_UNIT_NOT_READY_INITIALIZING_COMMAND_REQUIRED, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satTestUnitReady: stop state\n")); return tiSuccess; } /* * Check if format is in progress */ if (pSatDevData->satDriveState == SAT_DEV_STATE_FORMAT_IN_PROGRESS) { TI_DBG1(("satTestUnitReady() FORMAT_IN_PROGRESS tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_NOT_READY, 0, SCSI_SNSCODE_LOGICAL_UNIT_NOT_READY_FORMAT_IN_PROGRESS, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satTestUnitReady: format in progress\n")); return tiSuccess; } /* check previously issued ATA command */ if (pSatDevData->satPendingIO != 0) { if (pSatDevData->satDeviceFaultState == agTRUE) { satSetSensePayload( pSense, SCSI_SNSKEY_HARDWARE_ERROR, 0, SCSI_SNSCODE_LOGICAL_UNIT_FAILURE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satTestUnitReady: previous command ended in error\n")); return tiSuccess; } } /* check removalbe media feature set */ if(pSatDevData->satRemovableMedia && pSatDevData->satRemovableMediaEnabled) { TI_DBG5(("satTestUnitReady: sending get media status cmnd\n")); /* send GET MEDIA STATUS command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_GET_MEDIA_STATUS; /* 0xDA */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satTestUnitReadyCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /* number 6) in SAT p42 send ATA CHECK POWER MODE */ TI_DBG5(("satTestUnitReady: sending check power mode cmnd\n")); status = satTestUnitReady_1( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satTestUnitReady_1. * * SAT implementation for SCSI satTestUnitReady_1. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satTestUnitReady_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* sends SAT_CHECK_POWER_MODE as a part of TESTUNITREADY internally generated - no directly corresponding scsi called in satIOCompleted as a part of satTestUnitReady(), SAT, revision8, 8.11.2, p42 */ bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; fis = satIOContext->pFis; TI_DBG5(("satTestUnitReady_1: start\n")); /* * Send the ATA CHECK POWER MODE command. */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_CHECK_POWER_MODE; /* 0xE5 */ fis->h.features = 0; fis->d.lbaLow = 0; fis->d.lbaMid = 0; fis->d.lbaHigh = 0; fis->d.device = 0; fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satTestUnitReadyCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satTestUnitReady_1: return\n")); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satReportLun. * * SAT implementation for SCSI satReportLun. Only LUN0 is reported. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satReportLun( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { scsiRspSense_t *pSense; bit32 allocationLen; bit32 reportLunLen; scsiReportLun_t *pReportLun; tiIniScsiCmnd_t *scsiCmnd; TI_DBG5(("satReportLun entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSense = satIOContext->pSense; pReportLun = (scsiReportLun_t *) tiScsiRequest->sglVirtualAddr; scsiCmnd = &tiScsiRequest->scsiCmnd; // tdhexdump("satReportLun cdb", (bit8 *)scsiCmnd, 16); /* Find the buffer size allocated by Initiator */ allocationLen = (((bit32)scsiCmnd->cdb[6]) << 24) | (((bit32)scsiCmnd->cdb[7]) << 16) | (((bit32)scsiCmnd->cdb[8]) << 8 ) | (((bit32)scsiCmnd->cdb[9]) ); reportLunLen = 16; /* 8 byte header and 8 bytes of LUN0 */ if (allocationLen < reportLunLen) { TI_DBG1(("satReportLun *** ERROR *** insufficient len=0x%x tiDeviceHandle=%p tiIORequest=%p\n", reportLunLen, tiDeviceHandle, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } /* Set length to one entry */ pReportLun->len[0] = 0; pReportLun->len[1] = 0; pReportLun->len[2] = 0; pReportLun->len[3] = sizeof (tiLUN_t); pReportLun->reserved = 0; /* Set to LUN 0: * - address method to 0x00: Peripheral device addressing method, * - bus identifier to 0 */ pReportLun->lunList[0].lun[0] = 0; pReportLun->lunList[0].lun[1] = 0; pReportLun->lunList[0].lun[2] = 0; pReportLun->lunList[0].lun[3] = 0; pReportLun->lunList[0].lun[4] = 0; pReportLun->lunList[0].lun[5] = 0; pReportLun->lunList[0].lun[6] = 0; pReportLun->lunList[0].lun[7] = 0; if (allocationLen > reportLunLen) { /* underrun */ TI_DBG1(("satReportLun reporting underrun reportLunLen=0x%x allocationLen=0x%x \n", reportLunLen, allocationLen)); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOUnderRun, allocationLen - reportLunLen, agNULL, satIOContext->interruptContext ); } else { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); } return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI REQUEST SENSE. * * SAT implementation for SCSI REQUEST SENSE. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satRequestSense( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* SAT Rev 8 p38, Table25 sending SMART RETURN STATUS Checking SMART Treshold Exceeded Condition is done in satRequestSenseCB() Only fixed format sense data is support. In other words, we don't support DESC bit is set in Request Sense */ bit32 status; bit32 agRequestType; scsiRspSense_t *pSense; satDeviceData_t *pSatDevData; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; tdIORequestBody_t *tdIORequestBody; satInternalIo_t *satIntIo = agNULL; satIOContext_t *satIOContext2; TI_DBG4(("satRequestSense entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSense = (scsiRspSense_t *) tiScsiRequest->sglVirtualAddr; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG4(("satRequestSense: pSatDevData=%p\n", pSatDevData)); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satRequestSense: return control\n")); return tiSuccess; } /* Only fixed format sense data is support. In other words, we don't support DESC bit is set in Request Sense */ if ( scsiCmnd->cdb[1] & ATA_REMOVABLE_MEDIA_DEVICE_MASK ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satRequestSense: DESC bit is set, which we don't support\n")); return tiSuccess; } if (pSatDevData->satSMARTEnabled == agTRUE) { /* sends SMART RETURN STATUS */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_RETURN_STATUS; /* 0xB0 */ fis->h.features = 0xDA; /* FIS features */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMid = 0x4F; /* FIS LBA (15:8 ) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHigh = 0xC2; /* FIS LBA (23:16) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved4 = 0; fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satRequestSenseCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG4(("satRequestSense: if return, status %d\n", status)); return (status); } else { /*allocate iocontext for xmitting xmit SAT_CHECK_POWER_MODE then call satRequestSense2 */ TI_DBG4(("satRequestSense: before satIntIo %p\n", satIntIo)); /* allocate iocontext */ satIntIo = satAllocIntIoResource( tiRoot, tiIORequest, /* original request */ pSatDevData, tiScsiRequest->scsiCmnd.expDataLength, satIntIo); TI_DBG4(("satRequestSense: after satIntIo %p\n", satIntIo)); if (satIntIo == agNULL) { /* memory allocation failure */ satFreeIntIoResource( tiRoot, pSatDevData, satIntIo); /* failed during sending SMART RETURN STATUS */ satSetSensePayload( pSense, SCSI_SNSKEY_NO_SENSE, 0, SCSI_SNSCODE_HARDWARE_IMPENDING_FAILURE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext ); TI_DBG4(("satRequestSense: else fail 1\n")); return tiSuccess; } /* end of memory allocation failure */ /* * Need to initialize all the fields within satIOContext except * reqType and satCompleteCB which will be set depending on cmd. */ if (satIntIo == agNULL) { TI_DBG4(("satRequestSense: satIntIo is NULL\n")); } else { TI_DBG4(("satRequestSense: satIntIo is NOT NULL\n")); } /* use this --- tttttthe one the same */ satIntIo->satOrgTiIORequest = tiIORequest; tdIORequestBody = (tdIORequestBody_t *)satIntIo->satIntRequestBody; satIOContext2 = &(tdIORequestBody->transport.SATA.satIOContext); satIOContext2->pSatDevData = pSatDevData; satIOContext2->pFis = &(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev); satIOContext2->pScsiCmnd = &(satIntIo->satIntTiScsiXchg.scsiCmnd); satIOContext2->pSense = &(tdIORequestBody->transport.SATA.sensePayload); satIOContext2->pTiSenseData = &(tdIORequestBody->transport.SATA.tiSenseData); satIOContext2->pTiSenseData->senseData = satIOContext2->pSense; satIOContext2->tiRequestBody = satIntIo->satIntRequestBody; satIOContext2->interruptContext = satIOContext->interruptContext; satIOContext2->satIntIoContext = satIntIo; satIOContext2->ptiDeviceHandle = tiDeviceHandle; satIOContext2->satOrgIOContext = satIOContext; TI_DBG4(("satRequestSense: satIntIo->satIntTiScsiXchg.agSgl1.len %d\n", satIntIo->satIntTiScsiXchg.agSgl1.len)); TI_DBG4(("satRequestSense: satIntIo->satIntTiScsiXchg.agSgl1.upper %d\n", satIntIo->satIntTiScsiXchg.agSgl1.upper)); TI_DBG4(("satRequestSense: satIntIo->satIntTiScsiXchg.agSgl1.lower %d\n", satIntIo->satIntTiScsiXchg.agSgl1.lower)); TI_DBG4(("satRequestSense: satIntIo->satIntTiScsiXchg.agSgl1.type %d\n", satIntIo->satIntTiScsiXchg.agSgl1.type)); status = satRequestSense_1( tiRoot, &(satIntIo->satIntTiIORequest), tiDeviceHandle, &(satIntIo->satIntTiScsiXchg), satIOContext2); if (status != tiSuccess) { satFreeIntIoResource( tiRoot, pSatDevData, satIntIo); /* failed during sending SMART RETURN STATUS */ satSetSensePayload( pSense, SCSI_SNSKEY_NO_SENSE, 0, SCSI_SNSCODE_HARDWARE_IMPENDING_FAILURE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, agNULL, satIOContext->interruptContext ); TI_DBG1(("satRequestSense: else fail 2\n")); return tiSuccess; } TI_DBG4(("satRequestSense: else return success\n")); return tiSuccess; } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI REQUEST SENSE. * * SAT implementation for SCSI REQUEST SENSE. * Sub function of satRequestSense * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satRequestSense_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* sends SAT_CHECK_POWER_MODE */ bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; TI_DBG4(("satRequestSense_1 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); fis = satIOContext->pFis; /* * Send the ATA CHECK POWER MODE command. */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_CHECK_POWER_MODE; /* 0xE5 */ fis->h.features = 0; fis->d.lbaLow = 0; fis->d.lbaMid = 0; fis->d.lbaHigh = 0; fis->d.device = 0; fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satRequestSenseCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ TI_DBG4(("satRequestSense_1: agSgl1.len %d\n", tiScsiRequest->agSgl1.len)); TI_DBG4(("satRequestSense_1: agSgl1.upper %d\n", tiScsiRequest->agSgl1.upper)); TI_DBG4(("satRequestSense_1: agSgl1.lower %d\n", tiScsiRequest->agSgl1.lower)); TI_DBG4(("satRequestSense_1: agSgl1.type %d\n", tiScsiRequest->agSgl1.type)); // tdhexdump("satRequestSense_1", (bit8 *)fis, sizeof(agsaFisRegHostToDevice_t)); status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI INQUIRY. * * SAT implementation for SCSI INQUIRY. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satInquiry( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* CMDDT bit is obsolete in SPC-3 and this is assumed in SAT revision 8 */ scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; satDeviceData_t *pSatDevData; bit32 status; TI_DBG5(("satInquiry: start\n")); TI_DBG5(("satInquiry entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSense = satIOContext->pSense; scsiCmnd = &tiScsiRequest->scsiCmnd; pSatDevData = satIOContext->pSatDevData; TI_DBG5(("satInquiry: pSatDevData=%p\n", pSatDevData)); //tdhexdump("satInquiry", (bit8 *)scsiCmnd->cdb, 6); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satInquiry: return control\n")); return tiSuccess; } /* checking EVPD and Allocation Length */ /* SPC-4 spec 6.4 p141 */ /* EVPD bit == 0 && PAGE CODE != 0 */ if ( !(scsiCmnd->cdb[1] & SCSI_EVPD_MASK) && (scsiCmnd->cdb[2] != 0) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satInquiry: return EVPD and PAGE CODE\n")); return tiSuccess; } TI_DBG6(("satInquiry: allocation length 0x%x %d\n", ((scsiCmnd->cdb[3]) << 8) + scsiCmnd->cdb[4], ((scsiCmnd->cdb[3]) << 8) + scsiCmnd->cdb[4])); /* convert OS IO to TD internal IO */ if ( pSatDevData->IDDeviceValid == agFALSE) { status = satStartIDDev( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext ); TI_DBG6(("satInquiry: end status %d\n", status)); return status; } else { TI_DBG6(("satInquiry: calling satInquiryIntCB\n")); satInquiryIntCB( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext ); return tiSuccess; } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satReadCapacity10. * * SAT implementation for SCSI satReadCapacity10. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satReadCapacity10( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; bit8 *pVirtAddr; satDeviceData_t *pSatDevData; agsaSATAIdentifyData_t *pSATAIdData; bit32 lastLba; bit32 word117_118; bit32 word117; bit32 word118; TI_DBG5(("satReadCapacity10: start: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSense = satIOContext->pSense; pVirtAddr = (bit8 *) tiScsiRequest->sglVirtualAddr; scsiCmnd = &tiScsiRequest->scsiCmnd; pSatDevData = satIOContext->pSatDevData; pSATAIdData = &pSatDevData->satIdentifyData; /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satReadCapacity10: return control\n")); return tiSuccess; } /* * If Logical block address is not set to zero, return error */ if ((scsiCmnd->cdb[2] || scsiCmnd->cdb[3] || scsiCmnd->cdb[4] || scsiCmnd->cdb[5])) { TI_DBG1(("satReadCapacity10 *** ERROR *** logical address non zero, tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } /* * If PMI bit is not zero, return error */ if ( ((scsiCmnd->cdb[8]) & SCSI_READ_CAPACITY10_PMI_MASK) != 0 ) { TI_DBG1(("satReadCapacity10 *** ERROR *** PMI is not zero, tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } /* filling in Read Capacity parameter data saved identify device has been already flipped See ATA spec p125 and p136 and SBC spec p54 */ /* * If 48-bit addressing is supported, set capacity information from Identify * Device Word 100-103. */ if (pSatDevData->sat48BitSupport == agTRUE) { /* * Setting RETURNED LOGICAL BLOCK ADDRESS in READ CAPACITY(10) response data: * SBC-2 specifies that if the capacity exceeded the 4-byte RETURNED LOGICAL * BLOCK ADDRESS in READ CAPACITY(10) parameter data, the RETURNED LOGICAL * BLOCK ADDRESS should be set to 0xFFFFFFFF so the application client would * then issue a READ CAPACITY(16) command. */ /* ATA Identify Device information word 100 - 103 */ if ( (pSATAIdData->maxLBA32_47 != 0 ) || (pSATAIdData->maxLBA48_63 != 0)) { pVirtAddr[0] = 0xFF; /* MSB number of block */ pVirtAddr[1] = 0xFF; pVirtAddr[2] = 0xFF; pVirtAddr[3] = 0xFF; /* LSB number of block */ TI_DBG1(("satReadCapacity10: returns 0xFFFFFFFF\n")); } else /* Fit the Readcapacity10 4-bytes response length */ { lastLba = (((pSATAIdData->maxLBA16_31) << 16) ) | (pSATAIdData->maxLBA0_15); lastLba = lastLba - 1; /* LBA starts from zero */ /* for testing lastLba = lastLba - (512*10) - 1; */ pVirtAddr[0] = (bit8)((lastLba >> 24) & 0xFF); /* MSB */ pVirtAddr[1] = (bit8)((lastLba >> 16) & 0xFF); pVirtAddr[2] = (bit8)((lastLba >> 8) & 0xFF); pVirtAddr[3] = (bit8)((lastLba ) & 0xFF); /* LSB */ TI_DBG3(("satReadCapacity10: lastLba is 0x%x %d\n", lastLba, lastLba)); TI_DBG3(("satReadCapacity10: LBA 0 is 0x%x %d\n", pVirtAddr[0], pVirtAddr[0])); TI_DBG3(("satReadCapacity10: LBA 1 is 0x%x %d\n", pVirtAddr[1], pVirtAddr[1])); TI_DBG3(("satReadCapacity10: LBA 2 is 0x%x %d\n", pVirtAddr[2], pVirtAddr[2])); TI_DBG3(("satReadCapacity10: LBA 3 is 0x%x %d\n", pVirtAddr[3], pVirtAddr[3])); } } /* * For 28-bit addressing, set capacity information from Identify * Device Word 60-61. */ else { /* ATA Identify Device information word 60 - 61 */ lastLba = (((pSATAIdData->numOfUserAddressableSectorsHi) << 16) ) | (pSATAIdData->numOfUserAddressableSectorsLo); lastLba = lastLba - 1; /* LBA starts from zero */ pVirtAddr[0] = (bit8)((lastLba >> 24) & 0xFF); /* MSB */ pVirtAddr[1] = (bit8)((lastLba >> 16) & 0xFF); pVirtAddr[2] = (bit8)((lastLba >> 8) & 0xFF); pVirtAddr[3] = (bit8)((lastLba ) & 0xFF); /* LSB */ } /* SAT Rev 8d */ if (((pSATAIdData->word104_107[2]) & 0x1000) == 0) { TI_DBG5(("satReadCapacity10: Default Block Length is 512\n")); /* * Set the block size, fixed at 512 bytes. */ pVirtAddr[4] = 0x00; /* MSB block size in bytes */ pVirtAddr[5] = 0x00; pVirtAddr[6] = 0x02; pVirtAddr[7] = 0x00; /* LSB block size in bytes */ } else { word118 = pSATAIdData->word112_126[6]; word117 = pSATAIdData->word112_126[5]; word117_118 = (word118 << 16) + word117; word117_118 = word117_118 * 2; pVirtAddr[4] = (bit8)((word117_118 >> 24) & 0xFF); /* MSB block size in bytes */ pVirtAddr[5] = (bit8)((word117_118 >> 16) & 0xFF); pVirtAddr[6] = (bit8)((word117_118 >> 8) & 0xFF); pVirtAddr[7] = (bit8)(word117_118 & 0xFF); /* LSB block size in bytes */ TI_DBG1(("satReadCapacity10: Nondefault word118 %d 0x%x \n", word118, word118)); TI_DBG1(("satReadCapacity10: Nondefault word117 %d 0x%x \n", word117, word117)); TI_DBG1(("satReadCapacity10: Nondefault Block Length is %d 0x%x \n",word117_118, word117_118)); } /* fill in MAX LBA, which is used in satSendDiagnostic_1() */ pSatDevData->satMaxLBA[0] = 0; /* MSB */ pSatDevData->satMaxLBA[1] = 0; pSatDevData->satMaxLBA[2] = 0; pSatDevData->satMaxLBA[3] = 0; pSatDevData->satMaxLBA[4] = pVirtAddr[0]; pSatDevData->satMaxLBA[5] = pVirtAddr[1]; pSatDevData->satMaxLBA[6] = pVirtAddr[2]; pSatDevData->satMaxLBA[7] = pVirtAddr[3]; /* LSB */ TI_DBG4(("satReadCapacity10 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x , tiDeviceHandle=%p tiIORequest=%p\n", pVirtAddr[0], pVirtAddr[1], pVirtAddr[2], pVirtAddr[3], pVirtAddr[4], pVirtAddr[5], pVirtAddr[6], pVirtAddr[7], tiDeviceHandle, tiIORequest)); /* * Send the completion response now. */ ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satReadCapacity16. * * SAT implementation for SCSI satReadCapacity16. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satReadCapacity16( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; bit8 *pVirtAddr; satDeviceData_t *pSatDevData; agsaSATAIdentifyData_t *pSATAIdData; bit32 lastLbaLo; bit32 allocationLen; bit32 readCapacityLen = 32; bit32 i = 0; TI_DBG5(("satReadCapacity16 start: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSense = satIOContext->pSense; pVirtAddr = (bit8 *) tiScsiRequest->sglVirtualAddr; scsiCmnd = &tiScsiRequest->scsiCmnd; pSatDevData = satIOContext->pSatDevData; pSATAIdData = &pSatDevData->satIdentifyData; /* Find the buffer size allocated by Initiator */ allocationLen = (((bit32)scsiCmnd->cdb[10]) << 24) | (((bit32)scsiCmnd->cdb[11]) << 16) | (((bit32)scsiCmnd->cdb[12]) << 8 ) | (((bit32)scsiCmnd->cdb[13]) ); if (allocationLen < readCapacityLen) { TI_DBG1(("satReadCapacity16 *** ERROR *** insufficient len=0x%x readCapacityLen=0x%x\n", allocationLen, readCapacityLen)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[15] & SCSI_NACA_MASK) || (scsiCmnd->cdb[15] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satReadCapacity16: return control\n")); return tiSuccess; } /* * If Logical blcok address is not set to zero, return error */ if ((scsiCmnd->cdb[2] || scsiCmnd->cdb[3] || scsiCmnd->cdb[4] || scsiCmnd->cdb[5]) || (scsiCmnd->cdb[6] || scsiCmnd->cdb[7] || scsiCmnd->cdb[8] || scsiCmnd->cdb[9]) ) { TI_DBG1(("satReadCapacity16 *** ERROR *** logical address non zero, tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } /* * If PMI bit is not zero, return error */ if ( ((scsiCmnd->cdb[14]) & SCSI_READ_CAPACITY16_PMI_MASK) != 0 ) { TI_DBG1(("satReadCapacity16 *** ERROR *** PMI is not zero, tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } /* filling in Read Capacity parameter data */ /* * If 48-bit addressing is supported, set capacity information from Identify * Device Word 100-103. */ if (pSatDevData->sat48BitSupport == agTRUE) { pVirtAddr[0] = (bit8)(((pSATAIdData->maxLBA48_63) >> 8) & 0xff); /* MSB */ pVirtAddr[1] = (bit8)((pSATAIdData->maxLBA48_63) & 0xff); pVirtAddr[2] = (bit8)(((pSATAIdData->maxLBA32_47) >> 8) & 0xff); pVirtAddr[3] = (bit8)((pSATAIdData->maxLBA32_47) & 0xff); lastLbaLo = (((pSATAIdData->maxLBA16_31) << 16) ) | (pSATAIdData->maxLBA0_15); lastLbaLo = lastLbaLo - 1; /* LBA starts from zero */ pVirtAddr[4] = (bit8)((lastLbaLo >> 24) & 0xFF); pVirtAddr[5] = (bit8)((lastLbaLo >> 16) & 0xFF); pVirtAddr[6] = (bit8)((lastLbaLo >> 8) & 0xFF); pVirtAddr[7] = (bit8)((lastLbaLo ) & 0xFF); /* LSB */ } /* * For 28-bit addressing, set capacity information from Identify * Device Word 60-61. */ else { pVirtAddr[0] = 0; /* MSB */ pVirtAddr[1] = 0; pVirtAddr[2] = 0; pVirtAddr[3] = 0; lastLbaLo = (((pSATAIdData->numOfUserAddressableSectorsHi) << 16) ) | (pSATAIdData->numOfUserAddressableSectorsLo); lastLbaLo = lastLbaLo - 1; /* LBA starts from zero */ pVirtAddr[4] = (bit8)((lastLbaLo >> 24) & 0xFF); pVirtAddr[5] = (bit8)((lastLbaLo >> 16) & 0xFF); pVirtAddr[6] = (bit8)((lastLbaLo >> 8) & 0xFF); pVirtAddr[7] = (bit8)((lastLbaLo ) & 0xFF); /* LSB */ } /* * Set the block size, fixed at 512 bytes. */ pVirtAddr[8] = 0x00; /* MSB block size in bytes */ pVirtAddr[9] = 0x00; pVirtAddr[10] = 0x02; pVirtAddr[11] = 0x00; /* LSB block size in bytes */ /* fill in MAX LBA, which is used in satSendDiagnostic_1() */ pSatDevData->satMaxLBA[0] = pVirtAddr[0]; /* MSB */ pSatDevData->satMaxLBA[1] = pVirtAddr[1]; pSatDevData->satMaxLBA[2] = pVirtAddr[2]; pSatDevData->satMaxLBA[3] = pVirtAddr[3]; pSatDevData->satMaxLBA[4] = pVirtAddr[4]; pSatDevData->satMaxLBA[5] = pVirtAddr[5]; pSatDevData->satMaxLBA[6] = pVirtAddr[6]; pSatDevData->satMaxLBA[7] = pVirtAddr[7]; /* LSB */ TI_DBG5(("satReadCapacity16 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x , tiDeviceHandle=%p tiIORequest=%p\n", pVirtAddr[0], pVirtAddr[1], pVirtAddr[2], pVirtAddr[3], pVirtAddr[4], pVirtAddr[5], pVirtAddr[6], pVirtAddr[7], pVirtAddr[8], pVirtAddr[9], pVirtAddr[10], pVirtAddr[11], tiDeviceHandle, tiIORequest)); for(i=12;i<=31;i++) { pVirtAddr[i] = 0x00; } /* * Send the completion response now. */ if (allocationLen > readCapacityLen) { /* underrun */ TI_DBG1(("satReadCapacity16 reporting underrun readCapacityLen=0x%x allocationLen=0x%x \n", readCapacityLen, allocationLen)); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOUnderRun, allocationLen - readCapacityLen, agNULL, satIOContext->interruptContext ); } else { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); } return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI MODE SENSE (6). * * SAT implementation for SCSI MODE SENSE (6). * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satModeSense6( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { scsiRspSense_t *pSense; bit32 requestLen; tiIniScsiCmnd_t *scsiCmnd; bit32 pageSupported; bit8 page; bit8 *pModeSense; /* Mode Sense data buffer */ satDeviceData_t *pSatDevData; bit8 PC; bit8 AllPages[MODE_SENSE6_RETURN_ALL_PAGES_LEN]; bit8 Control[MODE_SENSE6_CONTROL_PAGE_LEN]; bit8 RWErrorRecovery[MODE_SENSE6_READ_WRITE_ERROR_RECOVERY_PAGE_LEN]; bit8 Caching[MODE_SENSE6_CACHING_LEN]; bit8 InfoExceptionCtrl[MODE_SENSE6_INFORMATION_EXCEPTION_CONTROL_PAGE_LEN]; bit8 lenRead = 0; TI_DBG5(("satModeSense6 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSense = satIOContext->pSense; scsiCmnd = &tiScsiRequest->scsiCmnd; pModeSense = (bit8 *) tiScsiRequest->sglVirtualAddr; pSatDevData = satIOContext->pSatDevData; //tdhexdump("satModeSense6", (bit8 *)scsiCmnd->cdb, 6); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satModeSense6: return control\n")); return tiSuccess; } /* checking PC(Page Control) SAT revion 8, 8.5.3 p33 and 10.1.2, p66 */ PC = (bit8)((scsiCmnd->cdb[2]) & SCSI_MODE_SENSE6_PC_MASK); if (PC != 0) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satModeSense6: return due to PC value pc 0x%x\n", PC >> 6)); return tiSuccess; } /* reading PAGE CODE */ page = (bit8)((scsiCmnd->cdb[2]) & SCSI_MODE_SENSE6_PAGE_CODE_MASK); TI_DBG5(("satModeSense6: page=0x%x, tiDeviceHandle=%p tiIORequest=%p\n", page, tiDeviceHandle, tiIORequest)); requestLen = scsiCmnd->cdb[4]; /* Based on page code value, returns a corresponding mode page note: no support for subpage */ switch(page) { case MODESENSE_RETURN_ALL_PAGES: case MODESENSE_CONTROL_PAGE: /* control */ case MODESENSE_READ_WRITE_ERROR_RECOVERY_PAGE: /* Read-Write Error Recovery */ case MODESENSE_CACHING: /* caching */ case MODESENSE_INFORMATION_EXCEPTION_CONTROL_PAGE: /* informational exceptions control*/ pageSupported = agTRUE; break; case MODESENSE_VENDOR_SPECIFIC_PAGE: /* vendor specific */ default: pageSupported = agFALSE; break; } if (pageSupported == agFALSE) { TI_DBG1(("satModeSense6 *** ERROR *** not supported page 0x%x tiDeviceHandle=%p tiIORequest=%p\n", page, tiDeviceHandle, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } switch(page) { case MODESENSE_RETURN_ALL_PAGES: lenRead = (bit8)MIN(requestLen, MODE_SENSE6_RETURN_ALL_PAGES_LEN); break; case MODESENSE_CONTROL_PAGE: /* control */ lenRead = (bit8)MIN(requestLen, MODE_SENSE6_CONTROL_PAGE_LEN); break; case MODESENSE_READ_WRITE_ERROR_RECOVERY_PAGE: /* Read-Write Error Recovery */ lenRead = (bit8)MIN(requestLen, MODE_SENSE6_READ_WRITE_ERROR_RECOVERY_PAGE_LEN); break; case MODESENSE_CACHING: /* caching */ lenRead = (bit8)MIN(requestLen, MODE_SENSE6_CACHING_LEN); break; case MODESENSE_INFORMATION_EXCEPTION_CONTROL_PAGE: /* informational exceptions control*/ lenRead = (bit8)MIN(requestLen, MODE_SENSE6_INFORMATION_EXCEPTION_CONTROL_PAGE_LEN); break; default: TI_DBG1(("satModeSense6: default error page %d\n", page)); break; } if (page == MODESENSE_RETURN_ALL_PAGES) { TI_DBG5(("satModeSense6: MODESENSE_RETURN_ALL_PAGES\n")); AllPages[0] = (bit8)(lenRead - 1); AllPages[1] = 0x00; /* default medium type (currently mounted medium type) */ AllPages[2] = 0x00; /* no write-protect, no support for DPO-FUA */ AllPages[3] = 0x08; /* block descriptor length */ /* * Fill-up direct-access device block-descriptor, SAT, Table 19 */ /* density code */ AllPages[4] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ AllPages[5] = 0x00; /* unspecified */ AllPages[6] = 0x00; /* unspecified */ AllPages[7] = 0x00; /* unspecified */ /* reserved */ AllPages[8] = 0x00; /* reserved */ /* Block size */ AllPages[9] = 0x00; AllPages[10] = 0x02; /* Block size is always 512 bytes */ AllPages[11] = 0x00; /* MODESENSE_READ_WRITE_ERROR_RECOVERY_PAGE */ AllPages[12] = 0x01; /* page code */ AllPages[13] = 0x0A; /* page length */ AllPages[14] = 0x40; /* ARRE is set */ AllPages[15] = 0x00; AllPages[16] = 0x00; AllPages[17] = 0x00; AllPages[18] = 0x00; AllPages[19] = 0x00; AllPages[20] = 0x00; AllPages[21] = 0x00; AllPages[22] = 0x00; AllPages[23] = 0x00; /* MODESENSE_CACHING */ AllPages[24] = 0x08; /* page code */ AllPages[25] = 0x12; /* page length */ #ifdef NOT_YET if (pSatDevData->satWriteCacheEnabled == agTRUE) { AllPages[26] = 0x04;/* WCE bit is set */ } else { AllPages[26] = 0x00;/* WCE bit is NOT set */ } #endif AllPages[26] = 0x00;/* WCE bit is NOT set */ AllPages[27] = 0x00; AllPages[28] = 0x00; AllPages[29] = 0x00; AllPages[30] = 0x00; AllPages[31] = 0x00; AllPages[32] = 0x00; AllPages[33] = 0x00; AllPages[34] = 0x00; AllPages[35] = 0x00; if (pSatDevData->satLookAheadEnabled == agTRUE) { AllPages[36] = 0x00;/* DRA bit is NOT set */ } else { AllPages[36] = 0x20;/* DRA bit is set */ } AllPages[37] = 0x00; AllPages[38] = 0x00; AllPages[39] = 0x00; AllPages[40] = 0x00; AllPages[41] = 0x00; AllPages[42] = 0x00; AllPages[43] = 0x00; /* MODESENSE_CONTROL_PAGE */ AllPages[44] = 0x0A; /* page code */ AllPages[45] = 0x0A; /* page length */ AllPages[46] = 0x02; /* only GLTSD bit is set */ if (pSatDevData->satNCQ == agTRUE) { AllPages[47] = 0x12; /* Queue Alogorithm modifier 1b and QErr 01b*/ } else { AllPages[47] = 0x02; /* Queue Alogorithm modifier 0b and QErr 01b */ } AllPages[48] = 0x00; AllPages[49] = 0x00; AllPages[50] = 0x00; /* obsolete */ AllPages[51] = 0x00; /* obsolete */ AllPages[52] = 0xFF; /* Busy Timeout Period */ AllPages[53] = 0xFF; /* Busy Timeout Period */ AllPages[54] = 0x00; /* we don't support non-000b value for the self-test code */ AllPages[55] = 0x00; /* we don't support non-000b value for the self-test code */ /* MODESENSE_INFORMATION_EXCEPTION_CONTROL_PAGE */ AllPages[56] = 0x1C; /* page code */ AllPages[57] = 0x0A; /* page length */ if (pSatDevData->satSMARTEnabled == agTRUE) { AllPages[58] = 0x00;/* DEXCPT bit is NOT set */ } else { AllPages[58] = 0x08;/* DEXCPT bit is set */ } AllPages[59] = 0x00; /* We don't support MRIE */ AllPages[60] = 0x00; /* Interval timer vendor-specific */ AllPages[61] = 0x00; AllPages[62] = 0x00; AllPages[63] = 0x00; AllPages[64] = 0x00; /* REPORT-COUNT */ AllPages[65] = 0x00; AllPages[66] = 0x00; AllPages[67] = 0x00; osti_memcpy(pModeSense, &AllPages, lenRead); } else if (page == MODESENSE_CONTROL_PAGE) { TI_DBG5(("satModeSense6: MODESENSE_CONTROL_PAGE\n")); Control[0] = MODE_SENSE6_CONTROL_PAGE_LEN - 1; Control[1] = 0x00; /* default medium type (currently mounted medium type) */ Control[2] = 0x00; /* no write-protect, no support for DPO-FUA */ Control[3] = 0x08; /* block descriptor length */ /* * Fill-up direct-access device block-descriptor, SAT, Table 19 */ /* density code */ Control[4] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ Control[5] = 0x00; /* unspecified */ Control[6] = 0x00; /* unspecified */ Control[7] = 0x00; /* unspecified */ /* reserved */ Control[8] = 0x00; /* reserved */ /* Block size */ Control[9] = 0x00; Control[10] = 0x02; /* Block size is always 512 bytes */ Control[11] = 0x00; /* * Fill-up control mode page, SAT, Table 65 */ Control[12] = 0x0A; /* page code */ Control[13] = 0x0A; /* page length */ Control[14] = 0x02; /* only GLTSD bit is set */ if (pSatDevData->satNCQ == agTRUE) { Control[15] = 0x12; /* Queue Alogorithm modifier 1b and QErr 01b*/ } else { Control[15] = 0x02; /* Queue Alogorithm modifier 0b and QErr 01b */ } Control[16] = 0x00; Control[17] = 0x00; Control[18] = 0x00; /* obsolete */ Control[19] = 0x00; /* obsolete */ Control[20] = 0xFF; /* Busy Timeout Period */ Control[21] = 0xFF; /* Busy Timeout Period */ Control[22] = 0x00; /* we don't support non-000b value for the self-test code */ Control[23] = 0x00; /* we don't support non-000b value for the self-test code */ osti_memcpy(pModeSense, &Control, lenRead); } else if (page == MODESENSE_READ_WRITE_ERROR_RECOVERY_PAGE) { TI_DBG5(("satModeSense6: MODESENSE_READ_WRITE_ERROR_RECOVERY_PAGE\n")); RWErrorRecovery[0] = MODE_SENSE6_READ_WRITE_ERROR_RECOVERY_PAGE_LEN - 1; RWErrorRecovery[1] = 0x00; /* default medium type (currently mounted medium type) */ RWErrorRecovery[2] = 0x00; /* no write-protect, no support for DPO-FUA */ RWErrorRecovery[3] = 0x08; /* block descriptor length */ /* * Fill-up direct-access device block-descriptor, SAT, Table 19 */ /* density code */ RWErrorRecovery[4] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ RWErrorRecovery[5] = 0x00; /* unspecified */ RWErrorRecovery[6] = 0x00; /* unspecified */ RWErrorRecovery[7] = 0x00; /* unspecified */ /* reserved */ RWErrorRecovery[8] = 0x00; /* reserved */ /* Block size */ RWErrorRecovery[9] = 0x00; RWErrorRecovery[10] = 0x02; /* Block size is always 512 bytes */ RWErrorRecovery[11] = 0x00; /* * Fill-up Read-Write Error Recovery mode page, SAT, Table 66 */ RWErrorRecovery[12] = 0x01; /* page code */ RWErrorRecovery[13] = 0x0A; /* page length */ RWErrorRecovery[14] = 0x40; /* ARRE is set */ RWErrorRecovery[15] = 0x00; RWErrorRecovery[16] = 0x00; RWErrorRecovery[17] = 0x00; RWErrorRecovery[18] = 0x00; RWErrorRecovery[19] = 0x00; RWErrorRecovery[20] = 0x00; RWErrorRecovery[21] = 0x00; RWErrorRecovery[22] = 0x00; RWErrorRecovery[23] = 0x00; osti_memcpy(pModeSense, &RWErrorRecovery, lenRead); } else if (page == MODESENSE_CACHING) { TI_DBG5(("satModeSense6: MODESENSE_CACHING\n")); /* special case */ if (requestLen == 4 && page == MODESENSE_CACHING) { TI_DBG5(("satModeSense6: linux 2.6.8.24 support\n")); pModeSense[0] = 0x20 - 1; /* 32 - 1 */ pModeSense[1] = 0x00; /* default medium type (currently mounted medium type) */ pModeSense[2] = 0x00; /* no write-protect, no support for DPO-FUA */ pModeSense[3] = 0x08; /* block descriptor length */ ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } Caching[0] = MODE_SENSE6_CACHING_LEN - 1; Caching[1] = 0x00; /* default medium type (currently mounted medium type) */ Caching[2] = 0x00; /* no write-protect, no support for DPO-FUA */ Caching[3] = 0x08; /* block descriptor length */ /* * Fill-up direct-access device block-descriptor, SAT, Table 19 */ /* density code */ Caching[4] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ Caching[5] = 0x00; /* unspecified */ Caching[6] = 0x00; /* unspecified */ Caching[7] = 0x00; /* unspecified */ /* reserved */ Caching[8] = 0x00; /* reserved */ /* Block size */ Caching[9] = 0x00; Caching[10] = 0x02; /* Block size is always 512 bytes */ Caching[11] = 0x00; /* * Fill-up Caching mode page, SAT, Table 67 */ /* length 20 */ Caching[12] = 0x08; /* page code */ Caching[13] = 0x12; /* page length */ #ifdef NOT_YET if (pSatDevData->satWriteCacheEnabled == agTRUE) { Caching[14] = 0x04;/* WCE bit is set */ } else { Caching[14] = 0x00;/* WCE bit is NOT set */ } #endif Caching[14] = 0x00;/* WCE bit is NOT set */ Caching[15] = 0x00; Caching[16] = 0x00; Caching[17] = 0x00; Caching[18] = 0x00; Caching[19] = 0x00; Caching[20] = 0x00; Caching[21] = 0x00; Caching[22] = 0x00; Caching[23] = 0x00; if (pSatDevData->satLookAheadEnabled == agTRUE) { Caching[24] = 0x00;/* DRA bit is NOT set */ } else { Caching[24] = 0x20;/* DRA bit is set */ } Caching[25] = 0x00; Caching[26] = 0x00; Caching[27] = 0x00; Caching[28] = 0x00; Caching[29] = 0x00; Caching[30] = 0x00; Caching[31] = 0x00; osti_memcpy(pModeSense, &Caching, lenRead); } else if (page == MODESENSE_INFORMATION_EXCEPTION_CONTROL_PAGE) { TI_DBG5(("satModeSense6: MODESENSE_INFORMATION_EXCEPTION_CONTROL_PAGE\n")); InfoExceptionCtrl[0] = MODE_SENSE6_INFORMATION_EXCEPTION_CONTROL_PAGE_LEN - 1; InfoExceptionCtrl[1] = 0x00; /* default medium type (currently mounted medium type) */ InfoExceptionCtrl[2] = 0x00; /* no write-protect, no support for DPO-FUA */ InfoExceptionCtrl[3] = 0x08; /* block descriptor length */ /* * Fill-up direct-access device block-descriptor, SAT, Table 19 */ /* density code */ InfoExceptionCtrl[4] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ InfoExceptionCtrl[5] = 0x00; /* unspecified */ InfoExceptionCtrl[6] = 0x00; /* unspecified */ InfoExceptionCtrl[7] = 0x00; /* unspecified */ /* reserved */ InfoExceptionCtrl[8] = 0x00; /* reserved */ /* Block size */ InfoExceptionCtrl[9] = 0x00; InfoExceptionCtrl[10] = 0x02; /* Block size is always 512 bytes */ InfoExceptionCtrl[11] = 0x00; /* * Fill-up informational-exceptions control mode page, SAT, Table 68 */ InfoExceptionCtrl[12] = 0x1C; /* page code */ InfoExceptionCtrl[13] = 0x0A; /* page length */ if (pSatDevData->satSMARTEnabled == agTRUE) { InfoExceptionCtrl[14] = 0x00;/* DEXCPT bit is NOT set */ } else { InfoExceptionCtrl[14] = 0x08;/* DEXCPT bit is set */ } InfoExceptionCtrl[15] = 0x00; /* We don't support MRIE */ InfoExceptionCtrl[16] = 0x00; /* Interval timer vendor-specific */ InfoExceptionCtrl[17] = 0x00; InfoExceptionCtrl[18] = 0x00; InfoExceptionCtrl[19] = 0x00; InfoExceptionCtrl[20] = 0x00; /* REPORT-COUNT */ InfoExceptionCtrl[21] = 0x00; InfoExceptionCtrl[22] = 0x00; InfoExceptionCtrl[23] = 0x00; osti_memcpy(pModeSense, &InfoExceptionCtrl, lenRead); } else { /* Error */ TI_DBG1(("satModeSense6: Error page %d\n", page)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } /* there can be only underrun not overrun in error case */ if (requestLen > lenRead) { TI_DBG6(("satModeSense6 reporting underrun lenRead=0x%x requestLen=0x%x tiIORequest=%p\n", lenRead, requestLen, tiIORequest)); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOUnderRun, requestLen - lenRead, agNULL, satIOContext->interruptContext ); } else { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); } return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI MODE SENSE (10). * * SAT implementation for SCSI MODE SENSE (10). * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satModeSense10( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { scsiRspSense_t *pSense; bit32 requestLen; tiIniScsiCmnd_t *scsiCmnd; bit32 pageSupported; bit8 page; bit8 *pModeSense; /* Mode Sense data buffer */ satDeviceData_t *pSatDevData; bit8 PC; /* page control */ bit8 LLBAA; /* Long LBA Accepted */ bit32 index; bit8 AllPages[MODE_SENSE10_RETURN_ALL_PAGES_LLBAA_LEN]; bit8 Control[MODE_SENSE10_CONTROL_PAGE_LLBAA_LEN]; bit8 RWErrorRecovery[MODE_SENSE10_READ_WRITE_ERROR_RECOVERY_PAGE_LLBAA_LEN]; bit8 Caching[MODE_SENSE10_CACHING_LLBAA_LEN]; bit8 InfoExceptionCtrl[MODE_SENSE10_INFORMATION_EXCEPTION_CONTROL_PAGE_LLBAA_LEN]; bit8 lenRead = 0; TI_DBG5(("satModeSense10 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSense = satIOContext->pSense; scsiCmnd = &tiScsiRequest->scsiCmnd; pModeSense = (bit8 *) tiScsiRequest->sglVirtualAddr; pSatDevData = satIOContext->pSatDevData; /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satModeSense10: return control\n")); return tiSuccess; } /* checking PC(Page Control) SAT revion 8, 8.5.3 p33 and 10.1.2, p66 */ PC = (bit8)((scsiCmnd->cdb[2]) & SCSI_MODE_SENSE10_PC_MASK); if (PC != 0) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satModeSense10: return due to PC value pc 0x%x\n", PC)); return tiSuccess; } /* finding LLBAA bit */ LLBAA = (bit8)((scsiCmnd->cdb[1]) & SCSI_MODE_SENSE10_LLBAA_MASK); /* reading PAGE CODE */ page = (bit8)((scsiCmnd->cdb[2]) & SCSI_MODE_SENSE10_PAGE_CODE_MASK); TI_DBG5(("satModeSense10: page=0x%x, tiDeviceHandle=%p tiIORequest=%p\n", page, tiDeviceHandle, tiIORequest)); requestLen = (scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]; /* Based on page code value, returns a corresponding mode page note: no support for subpage */ switch(page) { case MODESENSE_RETURN_ALL_PAGES: /* return all pages */ case MODESENSE_CONTROL_PAGE: /* control */ case MODESENSE_READ_WRITE_ERROR_RECOVERY_PAGE: /* Read-Write Error Recovery */ case MODESENSE_CACHING: /* caching */ case MODESENSE_INFORMATION_EXCEPTION_CONTROL_PAGE: /* informational exceptions control*/ pageSupported = agTRUE; break; case MODESENSE_VENDOR_SPECIFIC_PAGE: /* vendor specific */ default: pageSupported = agFALSE; break; } if (pageSupported == agFALSE) { TI_DBG1(("satModeSense10 *** ERROR *** not supported page 0x%x tiDeviceHandle=%p tiIORequest=%p\n", page, tiDeviceHandle, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } switch(page) { case MODESENSE_RETURN_ALL_PAGES: if (LLBAA) { lenRead = (bit8)MIN(requestLen, MODE_SENSE10_RETURN_ALL_PAGES_LLBAA_LEN); } else { lenRead = (bit8)MIN(requestLen, MODE_SENSE10_RETURN_ALL_PAGES_LEN); } break; case MODESENSE_CONTROL_PAGE: /* control */ if (LLBAA) { lenRead = (bit8)MIN(requestLen, MODE_SENSE10_CONTROL_PAGE_LLBAA_LEN); } else { lenRead = (bit8)MIN(requestLen, MODE_SENSE10_CONTROL_PAGE_LEN); } break; case MODESENSE_READ_WRITE_ERROR_RECOVERY_PAGE: /* Read-Write Error Recovery */ if (LLBAA) { lenRead = (bit8)MIN(requestLen, MODE_SENSE10_READ_WRITE_ERROR_RECOVERY_PAGE_LLBAA_LEN); } else { lenRead = (bit8)MIN(requestLen, MODE_SENSE10_READ_WRITE_ERROR_RECOVERY_PAGE_LEN); } break; case MODESENSE_CACHING: /* caching */ if (LLBAA) { lenRead = (bit8)MIN(requestLen, MODE_SENSE10_CACHING_LLBAA_LEN); } else { lenRead = (bit8)MIN(requestLen, MODE_SENSE10_CACHING_LEN); } break; case MODESENSE_INFORMATION_EXCEPTION_CONTROL_PAGE: /* informational exceptions control*/ if (LLBAA) { lenRead = (bit8)MIN(requestLen, MODE_SENSE10_INFORMATION_EXCEPTION_CONTROL_PAGE_LLBAA_LEN); } else { lenRead = (bit8)MIN(requestLen, MODE_SENSE10_INFORMATION_EXCEPTION_CONTROL_PAGE_LEN); } break; default: TI_DBG1(("satModeSense10: default error page %d\n", page)); break; } if (page == MODESENSE_RETURN_ALL_PAGES) { TI_DBG5(("satModeSense10: MODESENSE_RETURN_ALL_PAGES\n")); AllPages[0] = 0; AllPages[1] = (bit8)(lenRead - 2); AllPages[2] = 0x00; /* medium type: default medium type (currently mounted medium type) */ AllPages[3] = 0x00; /* device-specific param: no write-protect, no support for DPO-FUA */ if (LLBAA) { AllPages[4] = 0x00; /* reserved and LONGLBA */ AllPages[4] = (bit8)(AllPages[4] | 0x1); /* LONGLBA is set */ } else { AllPages[4] = 0x00; /* reserved and LONGLBA: LONGLBA is not set */ } AllPages[5] = 0x00; /* reserved */ AllPages[6] = 0x00; /* block descriptot length */ if (LLBAA) { AllPages[7] = 0x10; /* block descriptor length: LONGLBA is set. So, length is 16 */ } else { AllPages[7] = 0x08; /* block descriptor length: LONGLBA is NOT set. So, length is 8 */ } /* * Fill-up direct-access device block-descriptor, SAT, Table 19 */ if (LLBAA) { /* density code */ AllPages[8] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ AllPages[9] = 0x00; /* unspecified */ AllPages[10] = 0x00; /* unspecified */ AllPages[11] = 0x00; /* unspecified */ AllPages[12] = 0x00; /* unspecified */ AllPages[13] = 0x00; /* unspecified */ AllPages[14] = 0x00; /* unspecified */ AllPages[15] = 0x00; /* unspecified */ /* reserved */ AllPages[16] = 0x00; /* reserved */ AllPages[17] = 0x00; /* reserved */ AllPages[18] = 0x00; /* reserved */ AllPages[19] = 0x00; /* reserved */ /* Block size */ AllPages[20] = 0x00; AllPages[21] = 0x00; AllPages[22] = 0x02; /* Block size is always 512 bytes */ AllPages[23] = 0x00; } else { /* density code */ AllPages[8] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ AllPages[9] = 0x00; /* unspecified */ AllPages[10] = 0x00; /* unspecified */ AllPages[11] = 0x00; /* unspecified */ /* reserved */ AllPages[12] = 0x00; /* reserved */ /* Block size */ AllPages[13] = 0x00; AllPages[14] = 0x02; /* Block size is always 512 bytes */ AllPages[15] = 0x00; } if (LLBAA) { index = 24; } else { index = 16; } /* MODESENSE_READ_WRITE_ERROR_RECOVERY_PAGE */ AllPages[index+0] = 0x01; /* page code */ AllPages[index+1] = 0x0A; /* page length */ AllPages[index+2] = 0x40; /* ARRE is set */ AllPages[index+3] = 0x00; AllPages[index+4] = 0x00; AllPages[index+5] = 0x00; AllPages[index+6] = 0x00; AllPages[index+7] = 0x00; AllPages[index+8] = 0x00; AllPages[index+9] = 0x00; AllPages[index+10] = 0x00; AllPages[index+11] = 0x00; /* MODESENSE_CACHING */ /* * Fill-up Caching mode page, SAT, Table 67 */ /* length 20 */ AllPages[index+12] = 0x08; /* page code */ AllPages[index+13] = 0x12; /* page length */ #ifdef NOT_YET if (pSatDevData->satWriteCacheEnabled == agTRUE) { AllPages[index+14] = 0x04;/* WCE bit is set */ } else { AllPages[index+14] = 0x00;/* WCE bit is NOT set */ } #endif AllPages[index+14] = 0x00;/* WCE bit is NOT set */ AllPages[index+15] = 0x00; AllPages[index+16] = 0x00; AllPages[index+17] = 0x00; AllPages[index+18] = 0x00; AllPages[index+19] = 0x00; AllPages[index+20] = 0x00; AllPages[index+21] = 0x00; AllPages[index+22] = 0x00; AllPages[index+23] = 0x00; if (pSatDevData->satLookAheadEnabled == agTRUE) { AllPages[index+24] = 0x00;/* DRA bit is NOT set */ } else { AllPages[index+24] = 0x20;/* DRA bit is set */ } AllPages[index+25] = 0x00; AllPages[index+26] = 0x00; AllPages[index+27] = 0x00; AllPages[index+28] = 0x00; AllPages[index+29] = 0x00; AllPages[index+30] = 0x00; AllPages[index+31] = 0x00; /* MODESENSE_CONTROL_PAGE */ /* * Fill-up control mode page, SAT, Table 65 */ AllPages[index+32] = 0x0A; /* page code */ AllPages[index+33] = 0x0A; /* page length */ AllPages[index+34] = 0x02; /* only GLTSD bit is set */ if (pSatDevData->satNCQ == agTRUE) { AllPages[index+35] = 0x12; /* Queue Alogorithm modifier 1b and QErr 01b*/ } else { AllPages[index+35] = 0x02; /* Queue Alogorithm modifier 0b and QErr 01b */ } AllPages[index+36] = 0x00; AllPages[index+37] = 0x00; AllPages[index+38] = 0x00; /* obsolete */ AllPages[index+39] = 0x00; /* obsolete */ AllPages[index+40] = 0xFF; /* Busy Timeout Period */ AllPages[index+41] = 0xFF; /* Busy Timeout Period */ AllPages[index+42] = 0x00; /* we don't support non-000b value for the self-test code */ AllPages[index+43] = 0x00; /* we don't support non-000b value for the self-test code */ /* MODESENSE_INFORMATION_EXCEPTION_CONTROL_PAGE */ /* * Fill-up informational-exceptions control mode page, SAT, Table 68 */ AllPages[index+44] = 0x1C; /* page code */ AllPages[index+45] = 0x0A; /* page length */ if (pSatDevData->satSMARTEnabled == agTRUE) { AllPages[index+46] = 0x00;/* DEXCPT bit is NOT set */ } else { AllPages[index+46] = 0x08;/* DEXCPT bit is set */ } AllPages[index+47] = 0x00; /* We don't support MRIE */ AllPages[index+48] = 0x00; /* Interval timer vendor-specific */ AllPages[index+49] = 0x00; AllPages[index+50] = 0x00; AllPages[index+51] = 0x00; AllPages[index+52] = 0x00; /* REPORT-COUNT */ AllPages[index+53] = 0x00; AllPages[index+54] = 0x00; AllPages[index+55] = 0x00; osti_memcpy(pModeSense, &AllPages, lenRead); } else if (page == MODESENSE_CONTROL_PAGE) { TI_DBG5(("satModeSense10: MODESENSE_CONTROL_PAGE\n")); Control[0] = 0; Control[1] = (bit8)(lenRead - 2); Control[2] = 0x00; /* medium type: default medium type (currently mounted medium type) */ Control[3] = 0x00; /* device-specific param: no write-protect, no support for DPO-FUA */ if (LLBAA) { Control[4] = 0x00; /* reserved and LONGLBA */ Control[4] = (bit8)(Control[4] | 0x1); /* LONGLBA is set */ } else { Control[4] = 0x00; /* reserved and LONGLBA: LONGLBA is not set */ } Control[5] = 0x00; /* reserved */ Control[6] = 0x00; /* block descriptot length */ if (LLBAA) { Control[7] = 0x10; /* block descriptor length: LONGLBA is set. So, length is 16 */ } else { Control[7] = 0x08; /* block descriptor length: LONGLBA is NOT set. So, length is 8 */ } /* * Fill-up direct-access device block-descriptor, SAT, Table 19 */ if (LLBAA) { /* density code */ Control[8] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ Control[9] = 0x00; /* unspecified */ Control[10] = 0x00; /* unspecified */ Control[11] = 0x00; /* unspecified */ Control[12] = 0x00; /* unspecified */ Control[13] = 0x00; /* unspecified */ Control[14] = 0x00; /* unspecified */ Control[15] = 0x00; /* unspecified */ /* reserved */ Control[16] = 0x00; /* reserved */ Control[17] = 0x00; /* reserved */ Control[18] = 0x00; /* reserved */ Control[19] = 0x00; /* reserved */ /* Block size */ Control[20] = 0x00; Control[21] = 0x00; Control[22] = 0x02; /* Block size is always 512 bytes */ Control[23] = 0x00; } else { /* density code */ Control[8] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ Control[9] = 0x00; /* unspecified */ Control[10] = 0x00; /* unspecified */ Control[11] = 0x00; /* unspecified */ /* reserved */ Control[12] = 0x00; /* reserved */ /* Block size */ Control[13] = 0x00; Control[14] = 0x02; /* Block size is always 512 bytes */ Control[15] = 0x00; } if (LLBAA) { index = 24; } else { index = 16; } /* * Fill-up control mode page, SAT, Table 65 */ Control[index+0] = 0x0A; /* page code */ Control[index+1] = 0x0A; /* page length */ Control[index+2] = 0x02; /* only GLTSD bit is set */ if (pSatDevData->satNCQ == agTRUE) { Control[index+3] = 0x12; /* Queue Alogorithm modifier 1b and QErr 01b*/ } else { Control[index+3] = 0x02; /* Queue Alogorithm modifier 0b and QErr 01b */ } Control[index+4] = 0x00; Control[index+5] = 0x00; Control[index+6] = 0x00; /* obsolete */ Control[index+7] = 0x00; /* obsolete */ Control[index+8] = 0xFF; /* Busy Timeout Period */ Control[index+9] = 0xFF; /* Busy Timeout Period */ Control[index+10] = 0x00; /* we don't support non-000b value for the self-test code */ Control[index+11] = 0x00; /* we don't support non-000b value for the self-test code */ osti_memcpy(pModeSense, &Control, lenRead); } else if (page == MODESENSE_READ_WRITE_ERROR_RECOVERY_PAGE) { TI_DBG5(("satModeSense10: MODESENSE_READ_WRITE_ERROR_RECOVERY_PAGE\n")); RWErrorRecovery[0] = 0; RWErrorRecovery[1] = (bit8)(lenRead - 2); RWErrorRecovery[2] = 0x00; /* medium type: default medium type (currently mounted medium type) */ RWErrorRecovery[3] = 0x00; /* device-specific param: no write-protect, no support for DPO-FUA */ if (LLBAA) { RWErrorRecovery[4] = 0x00; /* reserved and LONGLBA */ RWErrorRecovery[4] = (bit8)(RWErrorRecovery[4] | 0x1); /* LONGLBA is set */ } else { RWErrorRecovery[4] = 0x00; /* reserved and LONGLBA: LONGLBA is not set */ } RWErrorRecovery[5] = 0x00; /* reserved */ RWErrorRecovery[6] = 0x00; /* block descriptot length */ if (LLBAA) { RWErrorRecovery[7] = 0x10; /* block descriptor length: LONGLBA is set. So, length is 16 */ } else { RWErrorRecovery[7] = 0x08; /* block descriptor length: LONGLBA is NOT set. So, length is 8 */ } /* * Fill-up direct-access device block-descriptor, SAT, Table 19 */ if (LLBAA) { /* density code */ RWErrorRecovery[8] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ RWErrorRecovery[9] = 0x00; /* unspecified */ RWErrorRecovery[10] = 0x00; /* unspecified */ RWErrorRecovery[11] = 0x00; /* unspecified */ RWErrorRecovery[12] = 0x00; /* unspecified */ RWErrorRecovery[13] = 0x00; /* unspecified */ RWErrorRecovery[14] = 0x00; /* unspecified */ RWErrorRecovery[15] = 0x00; /* unspecified */ /* reserved */ RWErrorRecovery[16] = 0x00; /* reserved */ RWErrorRecovery[17] = 0x00; /* reserved */ RWErrorRecovery[18] = 0x00; /* reserved */ RWErrorRecovery[19] = 0x00; /* reserved */ /* Block size */ RWErrorRecovery[20] = 0x00; RWErrorRecovery[21] = 0x00; RWErrorRecovery[22] = 0x02; /* Block size is always 512 bytes */ RWErrorRecovery[23] = 0x00; } else { /* density code */ RWErrorRecovery[8] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ RWErrorRecovery[9] = 0x00; /* unspecified */ RWErrorRecovery[10] = 0x00; /* unspecified */ RWErrorRecovery[11] = 0x00; /* unspecified */ /* reserved */ RWErrorRecovery[12] = 0x00; /* reserved */ /* Block size */ RWErrorRecovery[13] = 0x00; RWErrorRecovery[14] = 0x02; /* Block size is always 512 bytes */ RWErrorRecovery[15] = 0x00; } if (LLBAA) { index = 24; } else { index = 16; } /* * Fill-up Read-Write Error Recovery mode page, SAT, Table 66 */ RWErrorRecovery[index+0] = 0x01; /* page code */ RWErrorRecovery[index+1] = 0x0A; /* page length */ RWErrorRecovery[index+2] = 0x40; /* ARRE is set */ RWErrorRecovery[index+3] = 0x00; RWErrorRecovery[index+4] = 0x00; RWErrorRecovery[index+5] = 0x00; RWErrorRecovery[index+6] = 0x00; RWErrorRecovery[index+7] = 0x00; RWErrorRecovery[index+8] = 0x00; RWErrorRecovery[index+9] = 0x00; RWErrorRecovery[index+10] = 0x00; RWErrorRecovery[index+11] = 0x00; osti_memcpy(pModeSense, &RWErrorRecovery, lenRead); } else if (page == MODESENSE_CACHING) { TI_DBG5(("satModeSense10: MODESENSE_CACHING\n")); Caching[0] = 0; Caching[1] = (bit8)(lenRead - 2); Caching[2] = 0x00; /* medium type: default medium type (currently mounted medium type) */ Caching[3] = 0x00; /* device-specific param: no write-protect, no support for DPO-FUA */ if (LLBAA) { Caching[4] = 0x00; /* reserved and LONGLBA */ Caching[4] = (bit8)(Caching[4] | 0x1); /* LONGLBA is set */ } else { Caching[4] = 0x00; /* reserved and LONGLBA: LONGLBA is not set */ } Caching[5] = 0x00; /* reserved */ Caching[6] = 0x00; /* block descriptot length */ if (LLBAA) { Caching[7] = 0x10; /* block descriptor length: LONGLBA is set. So, length is 16 */ } else { Caching[7] = 0x08; /* block descriptor length: LONGLBA is NOT set. So, length is 8 */ } /* * Fill-up direct-access device block-descriptor, SAT, Table 19 */ if (LLBAA) { /* density code */ Caching[8] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ Caching[9] = 0x00; /* unspecified */ Caching[10] = 0x00; /* unspecified */ Caching[11] = 0x00; /* unspecified */ Caching[12] = 0x00; /* unspecified */ Caching[13] = 0x00; /* unspecified */ Caching[14] = 0x00; /* unspecified */ Caching[15] = 0x00; /* unspecified */ /* reserved */ Caching[16] = 0x00; /* reserved */ Caching[17] = 0x00; /* reserved */ Caching[18] = 0x00; /* reserved */ Caching[19] = 0x00; /* reserved */ /* Block size */ Caching[20] = 0x00; Caching[21] = 0x00; Caching[22] = 0x02; /* Block size is always 512 bytes */ Caching[23] = 0x00; } else { /* density code */ Caching[8] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ Caching[9] = 0x00; /* unspecified */ Caching[10] = 0x00; /* unspecified */ Caching[11] = 0x00; /* unspecified */ /* reserved */ Caching[12] = 0x00; /* reserved */ /* Block size */ Caching[13] = 0x00; Caching[14] = 0x02; /* Block size is always 512 bytes */ Caching[15] = 0x00; } if (LLBAA) { index = 24; } else { index = 16; } /* * Fill-up Caching mode page, SAT, Table 67 */ /* length 20 */ Caching[index+0] = 0x08; /* page code */ Caching[index+1] = 0x12; /* page length */ #ifdef NOT_YET if (pSatDevData->satWriteCacheEnabled == agTRUE) { Caching[index+2] = 0x04;/* WCE bit is set */ } else { Caching[index+2] = 0x00;/* WCE bit is NOT set */ } #endif Caching[index+2] = 0x00;/* WCE bit is NOT set */ Caching[index+3] = 0x00; Caching[index+4] = 0x00; Caching[index+5] = 0x00; Caching[index+6] = 0x00; Caching[index+7] = 0x00; Caching[index+8] = 0x00; Caching[index+9] = 0x00; Caching[index+10] = 0x00; Caching[index+11] = 0x00; if (pSatDevData->satLookAheadEnabled == agTRUE) { Caching[index+12] = 0x00;/* DRA bit is NOT set */ } else { Caching[index+12] = 0x20;/* DRA bit is set */ } Caching[index+13] = 0x00; Caching[index+14] = 0x00; Caching[index+15] = 0x00; Caching[index+16] = 0x00; Caching[index+17] = 0x00; Caching[index+18] = 0x00; Caching[index+19] = 0x00; osti_memcpy(pModeSense, &Caching, lenRead); } else if (page == MODESENSE_INFORMATION_EXCEPTION_CONTROL_PAGE) { TI_DBG5(("satModeSense10: MODESENSE_INFORMATION_EXCEPTION_CONTROL_PAGE\n")); InfoExceptionCtrl[0] = 0; InfoExceptionCtrl[1] = (bit8)(lenRead - 2); InfoExceptionCtrl[2] = 0x00; /* medium type: default medium type (currently mounted medium type) */ InfoExceptionCtrl[3] = 0x00; /* device-specific param: no write-protect, no support for DPO-FUA */ if (LLBAA) { InfoExceptionCtrl[4] = 0x00; /* reserved and LONGLBA */ InfoExceptionCtrl[4] = (bit8)(InfoExceptionCtrl[4] | 0x1); /* LONGLBA is set */ } else { InfoExceptionCtrl[4] = 0x00; /* reserved and LONGLBA: LONGLBA is not set */ } InfoExceptionCtrl[5] = 0x00; /* reserved */ InfoExceptionCtrl[6] = 0x00; /* block descriptot length */ if (LLBAA) { InfoExceptionCtrl[7] = 0x10; /* block descriptor length: LONGLBA is set. So, length is 16 */ } else { InfoExceptionCtrl[7] = 0x08; /* block descriptor length: LONGLBA is NOT set. So, length is 8 */ } /* * Fill-up direct-access device block-descriptor, SAT, Table 19 */ if (LLBAA) { /* density code */ InfoExceptionCtrl[8] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ InfoExceptionCtrl[9] = 0x00; /* unspecified */ InfoExceptionCtrl[10] = 0x00; /* unspecified */ InfoExceptionCtrl[11] = 0x00; /* unspecified */ InfoExceptionCtrl[12] = 0x00; /* unspecified */ InfoExceptionCtrl[13] = 0x00; /* unspecified */ InfoExceptionCtrl[14] = 0x00; /* unspecified */ InfoExceptionCtrl[15] = 0x00; /* unspecified */ /* reserved */ InfoExceptionCtrl[16] = 0x00; /* reserved */ InfoExceptionCtrl[17] = 0x00; /* reserved */ InfoExceptionCtrl[18] = 0x00; /* reserved */ InfoExceptionCtrl[19] = 0x00; /* reserved */ /* Block size */ InfoExceptionCtrl[20] = 0x00; InfoExceptionCtrl[21] = 0x00; InfoExceptionCtrl[22] = 0x02; /* Block size is always 512 bytes */ InfoExceptionCtrl[23] = 0x00; } else { /* density code */ InfoExceptionCtrl[8] = 0x04; /* density-code : reserved for direct-access */ /* number of blocks */ InfoExceptionCtrl[9] = 0x00; /* unspecified */ InfoExceptionCtrl[10] = 0x00; /* unspecified */ InfoExceptionCtrl[11] = 0x00; /* unspecified */ /* reserved */ InfoExceptionCtrl[12] = 0x00; /* reserved */ /* Block size */ InfoExceptionCtrl[13] = 0x00; InfoExceptionCtrl[14] = 0x02; /* Block size is always 512 bytes */ InfoExceptionCtrl[15] = 0x00; } if (LLBAA) { index = 24; } else { index = 16; } /* * Fill-up informational-exceptions control mode page, SAT, Table 68 */ InfoExceptionCtrl[index+0] = 0x1C; /* page code */ InfoExceptionCtrl[index+1] = 0x0A; /* page length */ if (pSatDevData->satSMARTEnabled == agTRUE) { InfoExceptionCtrl[index+2] = 0x00;/* DEXCPT bit is NOT set */ } else { InfoExceptionCtrl[index+2] = 0x08;/* DEXCPT bit is set */ } InfoExceptionCtrl[index+3] = 0x00; /* We don't support MRIE */ InfoExceptionCtrl[index+4] = 0x00; /* Interval timer vendor-specific */ InfoExceptionCtrl[index+5] = 0x00; InfoExceptionCtrl[index+6] = 0x00; InfoExceptionCtrl[index+7] = 0x00; InfoExceptionCtrl[index+8] = 0x00; /* REPORT-COUNT */ InfoExceptionCtrl[index+9] = 0x00; InfoExceptionCtrl[index+10] = 0x00; InfoExceptionCtrl[index+11] = 0x00; osti_memcpy(pModeSense, &InfoExceptionCtrl, lenRead); } else { /* Error */ TI_DBG1(("satModeSense10: Error page %d\n", page)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } if (requestLen > lenRead) { TI_DBG1(("satModeSense10 reporting underrun lenRead=0x%x requestLen=0x%x tiIORequest=%p\n", lenRead, requestLen, tiIORequest)); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOUnderRun, requestLen - lenRead, agNULL, satIOContext->interruptContext ); } else { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); } return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI VERIFY (10). * * SAT implementation for SCSI VERIFY (10). * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satVerify10( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* For simple implementation, no byte comparison supported as of 4/5/06 */ scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; satDeviceData_t *pSatDevData; agsaFisRegHostToDevice_t *fis; bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[4]; bit8 TL[4]; bit32 rangeChk = agFALSE; /* lba and tl range check */ TI_DBG5(("satVerify10 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSense = satIOContext->pSense; scsiCmnd = &tiScsiRequest->scsiCmnd; pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; /* checking BYTCHK */ if (scsiCmnd->cdb[1] & SCSI_VERIFY_BYTCHK_MASK) { /* should do the byte check but not supported in this version */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satVerify10: no byte checking \n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satVerify10: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; /* LSB */ TL[0] = 0; TL[1] = 0; TL[2] = scsiCmnd->cdb[7]; /* MSB */ TL[3] = scsiCmnd->cdb[8]; /* LSB */ rangeChk = satAddNComparebit32(LBA, TL); /* cbd10; computing LBA and transfer length */ lba = (scsiCmnd->cdb[2] << (8*3)) + (scsiCmnd->cdb[3] << (8*2)) + (scsiCmnd->cdb[4] << 8) + scsiCmnd->cdb[5]; tl = (scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]; if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satVerify10: return LBA out of range, not EXT\n")); TI_DBG1(("satVerify10: cdb 0x%x 0x%x 0x%x 0x%x\n",scsiCmnd->cdb[2], scsiCmnd->cdb[3], scsiCmnd->cdb[4], scsiCmnd->cdb[5])); TI_DBG1(("satVerify10: lba 0x%x SAT_TR_LBA_LIMIT 0x%x\n", lba, SAT_TR_LBA_LIMIT)); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satVerify10: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } if (pSatDevData->sat48BitSupport == agTRUE) { TI_DBG5(("satVerify10: SAT_READ_VERIFY_SECTORS_EXT\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set 01000000 */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; satIOContext->ATACmd = SAT_READ_VERIFY_SECTORS_EXT; } else { TI_DBG5(("satVerify10: SAT_READ_VERIFY_SECTORS\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS; /* 0x40 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; satIOContext->ATACmd = SAT_READ_VERIFY_SECTORS; } satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_READ_VERIFY_SECTORS) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_READ_VERIFY_SECTORS_EXT) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { TI_DBG1(("satVerify10: error case 1!!!\n")); LoopNum = 1; } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satVerify10: NON CHAINED data\n")); /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedVerifyCB; } else { TI_DBG1(("satVerify10: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_READ_VERIFY_SECTORS) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_READ_VERIFY_SECTORS_EXT) { fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { TI_DBG1(("satVerify10: error case 2!!!\n")); } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satChainedVerifyCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } GLOBAL bit32 satChainedVerify( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; satIOContext_t *satOrgIOContext = agNULL; agsaFisRegHostToDevice_t *fis; bit32 agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; bit32 lba = 0; bit32 DenomTL = 0xFF; bit32 Remainder = 0; bit8 LBA[4]; /* 0 MSB, 3 LSB */ TI_DBG2(("satChainedVerify: start\n")); fis = satIOContext->pFis; satOrgIOContext = satIOContext->satOrgIOContext; osti_memset(LBA,0, sizeof(LBA)); switch (satOrgIOContext->ATACmd) { case SAT_READ_VERIFY_SECTORS: DenomTL = 0xFF; break; case SAT_READ_VERIFY_SECTORS_EXT: DenomTL = 0xFFFF; break; default: TI_DBG1(("satChainedVerify: error incorrect ata command 0x%x\n", satIOContext->ATACmd)); return tiError; break; } Remainder = satOrgIOContext->OrgTL % DenomTL; satOrgIOContext->currentLBA = satOrgIOContext->currentLBA + DenomTL; lba = satOrgIOContext->currentLBA; LBA[0] = (bit8)((lba & 0xF000) >> (8 * 3)); /* MSB */ LBA[1] = (bit8)((lba & 0xF00) >> (8 * 2)); LBA[2] = (bit8)((lba & 0xF0) >> 8); LBA[3] = (bit8)(lba & 0xF); /* LSB */ switch (satOrgIOContext->ATACmd) { case SAT_READ_VERIFY_SECTORS: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS; /* 0x40 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (LBA[0] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)Remainder; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; break; case SAT_READ_VERIFY_SECTORS_EXT: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT; /* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = LBA[0]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.sectorCountExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; break; default: TI_DBG1(("satChainedVerify: error incorrect ata command 0x%x\n", satIOContext->ATACmd)); return tiError; break; } /* Initialize CB for SATA completion. */ /* chained data */ satIOContext->satCompleteCB = &satChainedVerifyCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satChainedVerify: return\n")); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI VERIFY (12). * * SAT implementation for SCSI VERIFY (12). * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satVerify12( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* For simple implementation, no byte comparison supported as of 4/5/06 */ scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; satDeviceData_t *pSatDevData; agsaFisRegHostToDevice_t *fis; bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[4]; bit8 TL[4]; bit32 rangeChk = agFALSE; /* lba and tl range check */ TI_DBG5(("satVerify12 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSense = satIOContext->pSense; scsiCmnd = &tiScsiRequest->scsiCmnd; pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; /* checking BYTCHK */ if (scsiCmnd->cdb[1] & SCSI_VERIFY_BYTCHK_MASK) { /* should do the byte check but not supported in this version */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satVerify12: no byte checking \n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[11] & SCSI_NACA_MASK) || (scsiCmnd->cdb[11] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satVerify12: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; /* LSB */ TL[0] = scsiCmnd->cdb[6]; /* MSB */ TL[1] = scsiCmnd->cdb[7]; TL[2] = scsiCmnd->cdb[7]; TL[3] = scsiCmnd->cdb[8]; /* LSB */ rangeChk = satAddNComparebit32(LBA, TL); lba = satComputeCDB12LBA(satIOContext); tl = satComputeCDB12TL(satIOContext); if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satVerify12: return LBA out of range, not EXT\n")); TI_DBG1(("satVerify12: cdb 0x%x 0x%x 0x%x 0x%x\n",scsiCmnd->cdb[2], scsiCmnd->cdb[3], scsiCmnd->cdb[4], scsiCmnd->cdb[5])); TI_DBG1(("satVerify12: lba 0x%x SAT_TR_LBA_LIMIT 0x%x\n", lba, SAT_TR_LBA_LIMIT)); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satVerify12: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } if (pSatDevData->sat48BitSupport == agTRUE) { TI_DBG5(("satVerify12: SAT_READ_VERIFY_SECTORS_EXT\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set 01000000 */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[8]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; satIOContext->ATACmd = SAT_READ_VERIFY_SECTORS_EXT; } else { TI_DBG5(("satVerify12: SAT_READ_VERIFY_SECTORS\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS; /* 0x40 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; satIOContext->ATACmd = SAT_READ_VERIFY_SECTORS; } satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_READ_VERIFY_SECTORS) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_READ_VERIFY_SECTORS_EXT) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { TI_DBG1(("satVerify12: error case 1!!!\n")); LoopNum = 1; } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satVerify12: NON CHAINED data\n")); /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedVerifyCB; } else { TI_DBG1(("satVerify12: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_READ_VERIFY_SECTORS) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_READ_VERIFY_SECTORS_EXT) { fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { TI_DBG1(("satVerify10: error case 2!!!\n")); } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satChainedVerifyCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI VERIFY (16). * * SAT implementation for SCSI VERIFY (16). * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satVerify16( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* For simple implementation, no byte comparison supported as of 4/5/06 */ scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; satDeviceData_t *pSatDevData; agsaFisRegHostToDevice_t *fis; bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[8]; bit8 TL[8]; bit32 rangeChk = agFALSE; /* lba and tl range check */ bit32 limitChk = agFALSE; /* lba and tl range check */ TI_DBG5(("satVerify16 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSense = satIOContext->pSense; scsiCmnd = &tiScsiRequest->scsiCmnd; pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; /* checking BYTCHK */ if (scsiCmnd->cdb[1] & SCSI_VERIFY_BYTCHK_MASK) { /* should do the byte check but not supported in this version */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satVerify16: no byte checking \n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[15] & SCSI_NACA_MASK) || (scsiCmnd->cdb[15] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satVerify16: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; LBA[4] = scsiCmnd->cdb[6]; LBA[5] = scsiCmnd->cdb[7]; LBA[6] = scsiCmnd->cdb[8]; LBA[7] = scsiCmnd->cdb[9]; /* LSB */ TL[0] = 0; TL[1] = 0; TL[2] = 0; TL[3] = 0; TL[4] = scsiCmnd->cdb[10]; /* MSB */ TL[5] = scsiCmnd->cdb[11]; TL[6] = scsiCmnd->cdb[12]; TL[7] = scsiCmnd->cdb[13]; /* LSB */ rangeChk = satAddNComparebit64(LBA, TL); limitChk = satCompareLBALimitbit(LBA); lba = satComputeCDB16LBA(satIOContext); tl = satComputeCDB16TL(satIOContext); if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (limitChk) { TI_DBG1(("satVerify16: return LBA out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satVerify16: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } if (pSatDevData->sat48BitSupport == agTRUE) { TI_DBG5(("satVerify16: SAT_READ_VERIFY_SECTORS_EXT\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set 01000000 */ fis->d.lbaLowExp = scsiCmnd->cdb[6]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = scsiCmnd->cdb[5]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = scsiCmnd->cdb[4]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[12]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; satIOContext->ATACmd = SAT_READ_VERIFY_SECTORS_EXT; } else { TI_DBG5(("satVerify12: SAT_READ_VERIFY_SECTORS\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS; /* 0x40 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[6] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; satIOContext->ATACmd = SAT_READ_VERIFY_SECTORS; } satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_READ_VERIFY_SECTORS) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_READ_VERIFY_SECTORS_EXT) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { TI_DBG1(("satVerify12: error case 1!!!\n")); LoopNum = 1; } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satVerify12: NON CHAINED data\n")); /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedVerifyCB; } else { TI_DBG1(("satVerify12: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_READ_VERIFY_SECTORS) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_READ_VERIFY_SECTORS_EXT) { fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { TI_DBG1(("satVerify10: error case 2!!!\n")); } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satChainedVerifyCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satFormatUnit. * * SAT implementation for SCSI satFormatUnit. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satFormatUnit( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* note: we don't support media certification in this version and IP bit satDevData->satFormatState will be agFalse since SAT does not actually sends any ATA command */ scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; bit32 index = 0; pSense = satIOContext->pSense; scsiCmnd = &tiScsiRequest->scsiCmnd; TI_DBG5(("satFormatUnit:start\n")); /* checking opcode 1. FMTDATA bit == 0(no defect list header) 2. FMTDATA bit == 1 and DCRT bit == 1(defect list header is provided with DCRT bit set) */ if ( ((scsiCmnd->cdb[1] & SCSI_FORMAT_UNIT_FMTDATA_MASK) == 0) || ((scsiCmnd->cdb[1] & SCSI_FORMAT_UNIT_FMTDATA_MASK) && (scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_DCRT_MASK)) ) { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); TI_DBG2(("satFormatUnit: return opcode\n")); return tiSuccess; } /* checking DEFECT LIST FORMAT and defect list length */ if ( (((scsiCmnd->cdb[1] & SCSI_FORMAT_UNIT_DEFECT_LIST_FORMAT_MASK) == 0x00) || ((scsiCmnd->cdb[1] & SCSI_FORMAT_UNIT_DEFECT_LIST_FORMAT_MASK) == 0x06)) ) { /* short parameter header */ if ((scsiCmnd->cdb[2] & SCSI_FORMAT_UNIT_LONGLIST_MASK) == 0x00) { index = 8; } /* long parameter header */ if ((scsiCmnd->cdb[2] & SCSI_FORMAT_UNIT_LONGLIST_MASK) == 0x01) { index = 10; } /* defect list length */ if ((scsiCmnd->cdb[index] != 0) || (scsiCmnd->cdb[index+1] != 0)) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satFormatUnit: return defect list format\n")); return tiSuccess; } } /* FMTDATA == 1 && CMPLIST == 1*/ if ( (scsiCmnd->cdb[1] & SCSI_FORMAT_UNIT_FMTDATA_MASK) && (scsiCmnd->cdb[1] & SCSI_FORMAT_UNIT_CMPLIST_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satFormatUnit: return cmplist\n")); return tiSuccess; } if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satFormatUnit: return control\n")); return tiSuccess; } /* defect list header filed, if exists, SAT rev8, Table 37, p48 */ if (scsiCmnd->cdb[1] & SCSI_FORMAT_UNIT_FMTDATA_MASK) { /* case 1,2,3 */ /* IMMED 1; FOV 0; FOV 1, DCRT 1, IP 0 */ if ( (scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_IMMED_MASK) || ( !(scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_FOV_MASK)) || ( (scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_FOV_MASK) && (scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_DCRT_MASK) && !(scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_IP_MASK)) ) { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); TI_DBG5(("satFormatUnit: return defect list case 1\n")); return tiSuccess; } /* case 4,5,6 */ /* 1. IMMED 0, FOV 1, DCRT 0, IP 0 2. IMMED 0, FOV 1, DCRT 0, IP 1 3. IMMED 0, FOV 1, DCRT 1, IP 1 */ if ( ( !(scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_IMMED_MASK) && (scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_FOV_MASK) && !(scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_DCRT_MASK) && !(scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_IP_MASK) ) || ( !(scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_IMMED_MASK) && (scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_FOV_MASK) && !(scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_DCRT_MASK) && (scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_IP_MASK) ) || ( !(scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_IMMED_MASK) && (scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_FOV_MASK) && (scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_DCRT_MASK) && (scsiCmnd->cdb[7] & SCSI_FORMAT_UNIT_IP_MASK) ) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_PARAMETER_LIST, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG5(("satFormatUnit: return defect list case 2\n")); return tiSuccess; } } /* * Send the completion response now. */ ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); TI_DBG5(("satFormatUnit: return last\n")); return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satSendDiagnostic. * * SAT implementation for SCSI satSendDiagnostic. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satSendDiagnostic( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 parmLen; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satSendDiagnostic:start\n")); /* reset satVerifyState */ pSatDevData->satVerifyState = 0; /* no pending diagnostic in background */ pSatDevData->satBGPendingDiag = agFALSE; /* table 27, 8.10 p39 SAT Rev8 */ /* 1. checking PF == 1 2. checking DEVOFFL == 1 3. checking UNITOFFL == 1 4. checking PARAMETER LIST LENGTH != 0 */ if ( (scsiCmnd->cdb[1] & SCSI_PF_MASK) || (scsiCmnd->cdb[1] & SCSI_DEVOFFL_MASK) || (scsiCmnd->cdb[1] & SCSI_UNITOFFL_MASK) || ( (scsiCmnd->cdb[3] != 0) || (scsiCmnd->cdb[4] != 0) ) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satSendDiagnostic: return PF, DEVOFFL, UNITOFFL, PARAM LIST\n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satSendDiagnostic: return control\n")); return tiSuccess; } parmLen = (scsiCmnd->cdb[3] << 8) + scsiCmnd->cdb[4]; /* checking SELFTEST bit*/ /* table 29, 8.10.3, p41 SAT Rev8 */ /* case 1 */ if ( !(scsiCmnd->cdb[1] & SCSI_SEND_DIAGNOSTIC_SELFTEST_MASK) && (pSatDevData->satSMARTSelfTest == agFALSE) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satSendDiagnostic: return Table 29 case 1\n")); return tiSuccess; } /* case 2 */ if ( !(scsiCmnd->cdb[1] & SCSI_SEND_DIAGNOSTIC_SELFTEST_MASK) && (pSatDevData->satSMARTSelfTest == agTRUE) && (pSatDevData->satSMARTEnabled == agFALSE) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ABORTED_COMMAND, 0, SCSI_SNSCODE_ATA_DEVICE_FEATURE_NOT_ENABLED, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG5(("satSendDiagnostic: return Table 29 case 2\n")); return tiSuccess; } /* case 3 see SELF TEST CODE later */ /* case 4 */ /* sends three ATA verify commands */ if ( ((scsiCmnd->cdb[1] & SCSI_SEND_DIAGNOSTIC_SELFTEST_MASK) && (pSatDevData->satSMARTSelfTest == agFALSE)) || ((scsiCmnd->cdb[1] & SCSI_SEND_DIAGNOSTIC_SELFTEST_MASK) && (pSatDevData->satSMARTSelfTest == agTRUE) && (pSatDevData->satSMARTEnabled == agFALSE)) ) { /* sector count 1, LBA 0 sector count 1, LBA MAX sector count 1, LBA random */ if (pSatDevData->sat48BitSupport == agTRUE) { /* sends READ VERIFY SECTOR(S) EXT*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.device = 0x40; /* 01000000 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; } else { /* READ VERIFY SECTOR(S)*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS;/* 0x40 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0x40; /* 01000000 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSendDiagnosticCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satSendDiagnostic: return Table 29 case 4\n")); return (status); } /* case 5 */ if ( (scsiCmnd->cdb[1] & SCSI_SEND_DIAGNOSTIC_SELFTEST_MASK) && (pSatDevData->satSMARTSelfTest == agTRUE) && (pSatDevData->satSMARTEnabled == agTRUE) ) { /* sends SMART EXECUTE OFF-LINE IMMEDIATE */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_EXEUTE_OFF_LINE_IMMEDIATE;/* 0xB0 */ fis->h.features = 0xD4; /* FIS features NA */ fis->d.lbaLow = 0x81; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0x4F; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0xC2; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSendDiagnosticCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satSendDiagnostic: return Table 29 case 5\n")); return (status); } /* SAT rev8 Table29 p41 case 3*/ /* checking SELF TEST CODE*/ if ( !(scsiCmnd->cdb[1] & SCSI_SEND_DIAGNOSTIC_SELFTEST_MASK) && (pSatDevData->satSMARTSelfTest == agTRUE) && (pSatDevData->satSMARTEnabled == agTRUE) ) { /* SAT rev8 Table28 p40 */ /* finding self-test code */ switch ((scsiCmnd->cdb[1] & SCSI_SEND_DIAGNOSTIC_TEST_CODE_MASK) >> 5) { case 1: pSatDevData->satBGPendingDiag = agTRUE; ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext ); /* sends SMART EXECUTE OFF-LINE IMMEDIATE */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_EXEUTE_OFF_LINE_IMMEDIATE;/* 0x40 */ fis->h.features = 0xD4; /* FIS features NA */ fis->d.lbaLow = 0x01; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0x4F; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0xC2; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSendDiagnosticCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satSendDiagnostic: return Table 28 case 1\n")); return (status); case 2: pSatDevData->satBGPendingDiag = agTRUE; ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext ); /* issuing SMART EXECUTE OFF-LINE IMMEDIATE */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_EXEUTE_OFF_LINE_IMMEDIATE;/* 0x40 */ fis->h.features = 0xD4; /* FIS features NA */ fis->d.lbaLow = 0x02; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0x4F; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0xC2; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSendDiagnosticCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satSendDiagnostic: return Table 28 case 2\n")); return (status); case 4: /* For simplicity, no abort is supported Returns good status need a flag in device data for previously sent background Send Diagnostic */ if (parmLen != 0) { /* check condition */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satSendDiagnostic: case 4, non zero ParmLen %d\n", parmLen)); return tiSuccess; } if (pSatDevData->satBGPendingDiag == agTRUE) { /* sends SMART EXECUTE OFF-LINE IMMEDIATE abort */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_EXEUTE_OFF_LINE_IMMEDIATE;/* 0x40 */ fis->h.features = 0xD4; /* FIS features NA */ fis->d.lbaLow = 0x7F; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0x4F; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0xC2; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSendDiagnosticCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satSendDiagnostic: send SAT_SMART_EXEUTE_OFF_LINE_IMMEDIATE case 3\n")); TI_DBG5(("satSendDiagnostic: Table 28 case 4\n")); return (status); } else { /* check condition */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satSendDiagnostic: case 4, no pending diagnostic in background\n")); TI_DBG5(("satSendDiagnostic: Table 28 case 4\n")); return tiSuccess; } break; case 5: /* issuing SMART EXECUTE OFF-LINE IMMEDIATE */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_EXEUTE_OFF_LINE_IMMEDIATE;/* 0x40 */ fis->h.features = 0xD4; /* FIS features NA */ fis->d.lbaLow = 0x81; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0x4F; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0xC2; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSendDiagnosticCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satSendDiagnostic: return Table 28 case 5\n")); return (status); case 6: /* issuing SMART EXECUTE OFF-LINE IMMEDIATE */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_EXEUTE_OFF_LINE_IMMEDIATE;/* 0x40 */ fis->h.features = 0xD4; /* FIS features NA */ fis->d.lbaLow = 0x82; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0x4F; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0xC2; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSendDiagnosticCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satSendDiagnostic: return Table 28 case 6\n")); return (status); case 0: case 3: /* fall through */ case 7: /* fall through */ default: break; }/* switch */ /* returns the results of default self-testing, which is good */ ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext ); TI_DBG5(("satSendDiagnostic: return Table 28 case 0,3,7 and default\n")); return tiSuccess; } ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext ); TI_DBG5(("satSendDiagnostic: return last\n")); return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satSendDiagnostic_1. * * SAT implementation for SCSI satSendDiagnostic_1. * Sub function of satSendDiagnostic. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satSendDiagnostic_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* SAT Rev9, Table29, p41 send 2nd SAT_READ_VERIFY_SECTORS(_EXT) */ bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; agsaFisRegHostToDevice_t *fis; TI_DBG5(("satSendDiagnostic_1 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; /* sector count 1, LBA MAX */ if (pSatDevData->sat48BitSupport == agTRUE) { /* sends READ VERIFY SECTOR(S) EXT*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = pSatDevData->satMaxLBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = pSatDevData->satMaxLBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = pSatDevData->satMaxLBA[5]; /* FIS LBA (23:16) */ fis->d.lbaLowExp = pSatDevData->satMaxLBA[4]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = pSatDevData->satMaxLBA[3]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = pSatDevData->satMaxLBA[2]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.device = 0x40; /* 01000000 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; } else { /* READ VERIFY SECTOR(S)*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS;/* 0x40 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = pSatDevData->satMaxLBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = pSatDevData->satMaxLBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = pSatDevData->satMaxLBA[5]; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = (bit8)((0x4 << 4) | (pSatDevData->satMaxLBA[4] & 0xF)); /* DEV and LBA 27:24 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; } agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSendDiagnosticCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satSendDiagnostic_2. * * SAT implementation for SCSI satSendDiagnostic_2. * Sub function of satSendDiagnostic. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satSendDiagnostic_2( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* SAT Rev9, Table29, p41 send 3rd SAT_READ_VERIFY_SECTORS(_EXT) */ bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; agsaFisRegHostToDevice_t *fis; TI_DBG5(("satSendDiagnostic_2 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; /* sector count 1, LBA Random */ if (pSatDevData->sat48BitSupport == agTRUE) { /* sends READ VERIFY SECTOR(S) EXT*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0x7F; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.device = 0x40; /* 01000000 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; } else { /* READ VERIFY SECTOR(S)*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS;/* 0x40 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = 0x7F; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0x40; /* FIS LBA mode set 01000000 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; } agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSendDiagnosticCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satStartStopUnit. * * SAT implementation for SCSI satStartStopUnit. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satStartStopUnit( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satStartStopUnit:start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satStartStopUnit: return control\n")); return tiSuccess; } /* Spec p55, Table 48 checking START and LOEJ bit */ /* case 1 */ if ( !(scsiCmnd->cdb[4] & SCSI_START_MASK) && !(scsiCmnd->cdb[4] & SCSI_LOEJ_MASK) ) { if ( (scsiCmnd->cdb[1] & SCSI_IMMED_MASK) ) { /* immed bit , SAT rev 8, 9.11.2.1 p 54*/ ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext ); TI_DBG5(("satStartStopUnit: return table48 case 1-1\n")); return tiSuccess; } /* sends FLUSH CACHE or FLUSH CACHE EXT */ if (pSatDevData->sat48BitSupport == agTRUE) { /* FLUSH CACHE EXT */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_FLUSH_CACHE_EXT; /* 0xEA */ fis->h.features = 0; /* FIS reserve */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved4 = 0; fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; } else { /* FLUSH CACHE */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_FLUSH_CACHE; /* 0xE7 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved4 = 0; fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satStartStopUnitCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satStartStopUnit: return table48 case 1\n")); return (status); } /* case 2 */ else if ( (scsiCmnd->cdb[4] & SCSI_START_MASK) && !(scsiCmnd->cdb[4] & SCSI_LOEJ_MASK) ) { /* immed bit , SAT rev 8, 9.11.2.1 p 54*/ if ( (scsiCmnd->cdb[1] & SCSI_IMMED_MASK) ) { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext ); TI_DBG5(("satStartStopUnit: return table48 case 2 1\n")); return tiSuccess; } /* sends READ_VERIFY_SECTORS(_EXT) sector count 1, any LBA between zero to Maximum */ if (pSatDevData->sat48BitSupport == agTRUE) { /* READ VERIFY SECTOR(S) EXT*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0x01; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0x00; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0x00; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0x00; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0x00; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0x00; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.device = 0x40; /* 01000000 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; } else { /* READ VERIFY SECTOR(S)*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS;/* 0x40 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = 0x01; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0x00; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0x00; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0x40; /* 01000000 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; } agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satStartStopUnitCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satStartStopUnit: return table48 case 2 2\n")); return status; } /* case 3 */ else if ( !(scsiCmnd->cdb[4] & SCSI_START_MASK) && (scsiCmnd->cdb[4] & SCSI_LOEJ_MASK) ) { if(pSatDevData->satRemovableMedia && pSatDevData->satRemovableMediaEnabled) { /* support for removal media */ /* immed bit , SAT rev 8, 9.11.2.1 p 54*/ if ( (scsiCmnd->cdb[1] & SCSI_IMMED_MASK) ) { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext ); TI_DBG5(("satStartStopUnit: return table48 case 3 1\n")); return tiSuccess; } /* sends MEDIA EJECT */ /* Media Eject fis */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_MEDIA_EJECT; /* 0xED */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; /* sector count zero */ fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved4 = 0; fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satStartStopUnitCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } else { /* no support for removal media */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG5(("satStartStopUnit: return Table 29 case 3 2\n")); return tiSuccess; } } /* case 4 */ else /* ( (scsiCmnd->cdb[4] & SCSI_START_MASK) && (scsiCmnd->cdb[4] & SCSI_LOEJ_MASK) ) */ { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG5(("satStartStopUnit: return Table 29 case 4\n")); return tiSuccess; } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satStartStopUnit_1. * * SAT implementation for SCSI satStartStopUnit_1. * Sub function of satStartStopUnit * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satStartStopUnit_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* SAT Rev 8, Table 48, 9.11.3 p55 sends STANDBY */ bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; TI_DBG5(("satStartStopUnit_1 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); fis = satIOContext->pFis; /* STANDBY */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_STANDBY; /* 0xE2 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0; /* 0 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satStartStopUnitCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satStartStopUnit_1 return status %d\n", status)); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satRead10_2. * * SAT implementation for SCSI satRead10_2 * Sub function of satRead10 * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satRead10_2( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* externally generated ATA cmd, there is corresponding scsi cmnd called by satStartStopUnit() or maybe satRead10() */ bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; agsaFisRegHostToDevice_t *fis; pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; TI_DBG5(("satReadVerifySectorsNoChain: start\n")); /* specifying ReadVerifySectors has no chain */ pSatDevData->satVerifyState = 0xFFFFFFFF; if (pSatDevData->sat48BitSupport == agTRUE) { /* READ VERIFY SECTOR(S) EXT*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0x7F; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0x4F; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0x00; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0xF1; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0x5F; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0xFF; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.device = 0x4E; /* 01001110 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; } else { /* READ VERIFY SECTOR(S)*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS;/* 0x40 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = 0x7F; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0x4F; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0x00; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = 0x4E; /* 01001110 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonDataIOCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satReadVerifySectorsNoChain: return last\n")); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWriteSame10. * * SAT implementation for SCSI satWriteSame10. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWriteSame10( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satWriteSame10: start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteSame10: return control\n")); return tiSuccess; } /* checking LBDATA and PBDATA */ /* case 1 */ if ( !(scsiCmnd->cdb[1] & SCSI_WRITE_SAME_LBDATA_MASK) && !(scsiCmnd->cdb[1] & SCSI_WRITE_SAME_PBDATA_MASK)) { TI_DBG5(("satWriteSame10: case 1\n")); /* spec 9.26.2, Table 62, p64, case 1*/ /* normal case just like write in 9.17.1 */ if ( pSatDevData->sat48BitSupport != agTRUE ) { /* writeSame10 but no support for 48 bit addressing -> problem in transfer length. Therefore, return check condition */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteSame10: return internal checking\n")); return tiSuccess; } /* cdb10; computing LBA and transfer length */ lba = (scsiCmnd->cdb[2] << (8*3)) + (scsiCmnd->cdb[3] << (8*2)) + (scsiCmnd->cdb[4] << 8) + scsiCmnd->cdb[5]; tl = (scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]; /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b (footnote) When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) /* SAT_TR_LBA_LIMIT is 2^28, 0x10000000 */ { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteSame10: return LBA out of range\n")); return tiSuccess; } } if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* WRITE DMA */ /* can't fit the transfer length since WRITE DMA has 1 byte for sector count */ TI_DBG5(("satWriteSame10: case 1-2 !!! error due to writeSame10\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } else { /* case 1 */ /* WRITE MULTIPLE or WRITE SECTOR(S) */ /* WRITE SECTORS is chosen for easier implemetation */ /* can't fit the transfer length since WRITE DMA has 1 byte for sector count */ TI_DBG5(("satWriteSame10: case 1-1 !!! error due to writesame10\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* end of case 1 and 2 */ /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* WRITE DMA EXT or WRITE DMA FUA EXT */ /* WRITE DMA EXT is chosen since WRITE SAME does not have FUA bit */ TI_DBG5(("satWriteSame10: case 1-3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (tl == 0) { /* error check ATA spec, p125, 6.17.29 pSatDevData->satMaxUserAddrSectors should be 0x0FFFFFFF and allowed value is 0x0FFFFFFF - 1 */ if (pSatDevData->satMaxUserAddrSectors > 0x0FFFFFFF) { TI_DBG5(("satWriteSame10: case 3 !!! warning can't fit sectors\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* one sector at a time */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; } else { /* case 4 */ /* WRITE MULTIPLE EXT or WRITE MULTIPLE FUA EXT or WRITE SECTOR(S) EXT */ /* WRITE SECTORS EXT is chosen for easier implemetation */ TI_DBG5(("satWriteSame10: case 1-4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (tl == 0) { /* error check ATA spec, p125, 6.17.29 pSatDevData->satMaxUserAddrSectors should be 0x0FFFFFFF and allowed value is 0x0FFFFFFF - 1 */ if (pSatDevData->satMaxUserAddrSectors > 0x0FFFFFFF) { TI_DBG5(("satWriteSame10: case 4 !!! warning can't fit sectors\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* one sector at a time */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* WRITE FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satWriteSame10: case 1-5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG5(("satWriteSame10: case 1-5\n")); /* Support 48-bit FPDMA addressing, use WRITE FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ if (tl == 0) { /* error check ATA spec, p125, 6.17.29 pSatDevData->satMaxUserAddrSectors should be 0x0FFFFFFF and allowed value is 0x0FFFFFFF - 1 */ if (pSatDevData->satMaxUserAddrSectors > 0x0FFFFFFF) { TI_DBG5(("satWriteSame10: case 4 !!! warning can't fit sectors\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* one sector at a time */ fis->h.features = 1; /* FIS sector count (7:0) */ fis->d.featuresExp = 0; /* FIS sector count (15:8) */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* NO FUA bit in the WRITE SAME 10 */ fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satWriteSame10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /* end of case 1 */ else if ( !(scsiCmnd->cdb[1] & SCSI_WRITE_SAME_LBDATA_MASK) && (scsiCmnd->cdb[1] & SCSI_WRITE_SAME_PBDATA_MASK)) { /* spec 9.26.2, Table 62, p64, case 2*/ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG5(("satWriteSame10: return Table 62 case 2\n")); return tiSuccess; } else if ( (scsiCmnd->cdb[1] & SCSI_WRITE_SAME_LBDATA_MASK) && !(scsiCmnd->cdb[1] & SCSI_WRITE_SAME_PBDATA_MASK)) { TI_DBG5(("satWriteSame10: Table 62 case 3\n")); } else /* ( (scsiCmnd->cdb[1] & SCSI_WRITE_SAME_LBDATA_MASK) && (scsiCmnd->cdb[1] & SCSI_WRITE_SAME_PBDATA_MASK)) */ { /* spec 9.26.2, Table 62, p64, case 4*/ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG5(("satWriteSame10: return Table 62 case 4\n")); return tiSuccess; } return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWriteSame10_1. * * SAT implementation for SCSI WRITESANE10 and send FIS request to LL layer. * This is used when WRITESAME10 is divided into multiple ATA commands * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * \param lba: LBA * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWriteSame10_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext, bit32 lba ) { /* sends SAT_WRITE_DMA_EXT */ bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; bit8 lba1, lba2 ,lba3, lba4; TI_DBG5(("satWriteSame10_1 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); fis = satIOContext->pFis; /* MSB */ lba1 = (bit8)((lba & 0xFF000000) >> (8*3)); lba2 = (bit8)((lba & 0x00FF0000) >> (8*2)); lba3 = (bit8)((lba & 0x0000FF00) >> (8*1)); /* LSB */ lba4 = (bit8)(lba & 0x000000FF); /* SAT_WRITE_DMA_EXT */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = lba4; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = lba3; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = lba2; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = lba1; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ /* one sector at a time */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satWriteSame10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satWriteSame10_1 return status %d\n", status)); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWriteSame10_2. * * SAT implementation for SCSI WRITESANE10 and send FIS request to LL layer. * This is used when WRITESAME10 is divided into multiple ATA commands * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * \param lba: LBA * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWriteSame10_2( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext, bit32 lba ) { /* sends SAT_WRITE_SECTORS_EXT */ bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; bit8 lba1, lba2 ,lba3, lba4; TI_DBG5(("satWriteSame10_2 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); fis = satIOContext->pFis; /* MSB */ lba1 = (bit8)((lba & 0xFF000000) >> (8*3)); lba2 = (bit8)((lba & 0x00FF0000) >> (8*2)); lba3 = (bit8)((lba & 0x0000FF00) >> (8*1)); /* LSB */ lba4 = (bit8)(lba & 0x000000FF); /* SAT_WRITE_SECTORS_EXT */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = lba4; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = lba3; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = lba2; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = lba1; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ /* one sector at a time */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satWriteSame10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satWriteSame10_2 return status %d\n", status)); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWriteSame10_3. * * SAT implementation for SCSI WRITESANE10 and send FIS request to LL layer. * This is used when WRITESAME10 is divided into multiple ATA commands * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * \param lba: LBA * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWriteSame10_3( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext, bit32 lba ) { /* sends SAT_WRITE_FPDMA_QUEUED */ bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; bit8 lba1, lba2 ,lba3, lba4; TI_DBG5(("satWriteSame10_3 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); fis = satIOContext->pFis; /* MSB */ lba1 = (bit8)((lba & 0xFF000000) >> (8*3)); lba2 = (bit8)((lba & 0x00FF0000) >> (8*2)); lba3 = (bit8)((lba & 0x0000FF00) >> (8*1)); /* LSB */ lba4 = (bit8)(lba & 0x000000FF); /* SAT_WRITE_FPDMA_QUEUED */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ /* one sector at a time */ fis->h.features = 1; /* FIS sector count (7:0) */ fis->d.featuresExp = 0; /* FIS sector count (15:8) */ fis->d.lbaLow = lba4; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = lba3; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = lba2; /* FIS LBA (23:16) */ /* NO FUA bit in the WRITE SAME 10 */ fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = lba1; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satWriteSame10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satWriteSame10_2 return status %d\n", status)); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWriteSame16. * * SAT implementation for SCSI satWriteSame16. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWriteSame16( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { scsiRspSense_t *pSense; pSense = satIOContext->pSense; TI_DBG5(("satWriteSame16:start\n")); satSetSensePayload( pSense, SCSI_SNSKEY_NO_SENSE, 0, SCSI_SNSCODE_NO_ADDITIONAL_INFO, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, /* == &satIntIo->satOrgTiIORequest */ tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG5(("satWriteSame16: return internal checking\n")); return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satLogSense_1. * * Part of SAT implementation for SCSI satLogSense. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satLogSense_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; agsaFisRegHostToDevice_t *fis; pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; TI_DBG5(("satLogSense_1: start\n")); /* SAT Rev 8, 10.2.4 p74 */ if ( pSatDevData->sat48BitSupport == agTRUE ) { TI_DBG5(("satLogSense_1: case 2-1 sends READ LOG EXT\n")); /* sends READ LOG EXT */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_LOG_EXT; /* 0x2F */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0x07; /* 0x07 */ fis->d.lbaMid = 0; /* */ fis->d.lbaHigh = 0; /* */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 0x01; /* 1 sector counts */ fis->d.sectorCountExp = 0x00; /* 1 sector counts */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satLogSenseCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } else { TI_DBG5(("satLogSense_1: case 2-2 sends SMART READ LOG\n")); /* sends SMART READ LOG */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_READ_LOG; /* 0x2F */ fis->h.features = 0x00; /* 0xd5 */ fis->d.lbaLow = 0x06; /* 0x06 */ fis->d.lbaMid = 0x00; /* 0x4f */ fis->d.lbaHigh = 0x00; /* 0xc2 */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 0x01; /* */ fis->d.sectorCountExp = 0x00; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satLogSenseCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satSMARTEnable. * * Part of SAT implementation for SCSI satLogSense. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satSMARTEnable( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; TI_DBG4(("satSMARTEnable entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); fis = satIOContext->pFis; /* * Send the SAT_SMART_ENABLE_OPERATIONS command. */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_ENABLE_OPERATIONS; /* 0xB0 */ fis->h.features = 0xD8; fis->d.lbaLow = 0; fis->d.lbaMid = 0x4F; fis->d.lbaHigh = 0xC2; fis->d.device = 0; fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSMARTEnableCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satLogSense_3. * * Part of SAT implementation for SCSI satLogSense. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satLogSense_3( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; TI_DBG4(("satLogSense_3 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); fis = satIOContext->pFis; /* sends READ LOG EXT */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_READ_LOG; /* 0x2F */ fis->h.features = 0xD5; /* 0xd5 */ fis->d.lbaLow = 0x06; /* 0x06 */ fis->d.lbaMid = 0x4F; /* 0x4f */ fis->d.lbaHigh = 0xC2; /* 0xc2 */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 0x01; /* 1 sector counts */ fis->d.sectorCountExp = 0x00; /* 1 sector counts */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satLogSenseCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satLogSense_2. * * Part of SAT implementation for SCSI satLogSense. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satLogSense_2( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; TI_DBG4(("satLogSense_2 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); fis = satIOContext->pFis; /* sends READ LOG EXT */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_LOG_EXT; /* 0x2F */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0x07; /* 0x07 */ fis->d.lbaMid = 0; /* */ fis->d.lbaHigh = 0; /* */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 0x01; /* 1 sector counts */ fis->d.sectorCountExp = 0x00; /* 1 sector counts */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satLogSenseCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satLogSenseAllocate. * * Part of SAT implementation for SCSI satLogSense. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * \param payloadSize: size of payload to be allocated. * \param flag: flag value * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. * \note * - flag values: LOG_SENSE_0, LOG_SENSE_1, LOG_SENSE_2 */ /*****************************************************************************/ GLOBAL bit32 satLogSenseAllocate( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext, bit32 payloadSize, bit32 flag ) { satDeviceData_t *pSatDevData; tdIORequestBody_t *tdIORequestBody; satInternalIo_t *satIntIo = agNULL; satIOContext_t *satIOContext2; bit32 status; TI_DBG4(("satLogSense_2 entry: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); pSatDevData = satIOContext->pSatDevData; /* create internal satIOContext */ satIntIo = satAllocIntIoResource( tiRoot, tiIORequest, /* original request */ pSatDevData, payloadSize, satIntIo); if (satIntIo == agNULL) { /* memory allocation failure */ satFreeIntIoResource( tiRoot, pSatDevData, satIntIo); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOFailed, tiDetailOtherError, agNULL, satIOContext->interruptContext ); TI_DBG4(("satLogSense_2: fail in allocation\n")); return tiSuccess; } /* end of memory allocation failure */ satIntIo->satOrgTiIORequest = tiIORequest; tdIORequestBody = (tdIORequestBody_t *)satIntIo->satIntRequestBody; satIOContext2 = &(tdIORequestBody->transport.SATA.satIOContext); satIOContext2->pSatDevData = pSatDevData; satIOContext2->pFis = &(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev); satIOContext2->pScsiCmnd = &(satIntIo->satIntTiScsiXchg.scsiCmnd); satIOContext2->pSense = &(tdIORequestBody->transport.SATA.sensePayload); satIOContext2->pTiSenseData = &(tdIORequestBody->transport.SATA.tiSenseData); satIOContext2->pTiSenseData->senseData = satIOContext2->pSense; satIOContext2->tiRequestBody = satIntIo->satIntRequestBody; satIOContext2->interruptContext = satIOContext->interruptContext; satIOContext2->satIntIoContext = satIntIo; satIOContext2->ptiDeviceHandle = tiDeviceHandle; satIOContext2->satOrgIOContext = satIOContext; if (flag == LOG_SENSE_0) { /* SAT_SMART_ENABLE_OPERATIONS */ status = satSMARTEnable( tiRoot, &(satIntIo->satIntTiIORequest), tiDeviceHandle, &(satIntIo->satIntTiScsiXchg), satIOContext2); } else if (flag == LOG_SENSE_1) { /* SAT_READ_LOG_EXT */ status = satLogSense_2( tiRoot, &(satIntIo->satIntTiIORequest), tiDeviceHandle, &(satIntIo->satIntTiScsiXchg), satIOContext2); } else { /* SAT_SMART_READ_LOG */ /* SAT_READ_LOG_EXT */ status = satLogSense_3( tiRoot, &(satIntIo->satIntTiIORequest), tiDeviceHandle, &(satIntIo->satIntTiScsiXchg), satIOContext2); } if (status != tiSuccess) { satFreeIntIoResource( tiRoot, pSatDevData, satIntIo); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOFailed, tiDetailOtherError, agNULL, satIOContext->interruptContext ); return tiSuccess; } return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satLogSense. * * SAT implementation for SCSI satLogSense. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satLogSense( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit8 *pLogPage; /* Log Page data buffer */ bit32 flag = 0; bit16 AllocLen = 0; /* allocation length */ bit8 AllLogPages[8]; bit16 lenRead = 0; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; pLogPage = (bit8 *) tiScsiRequest->sglVirtualAddr; TI_DBG5(("satLogSense: start\n")); osti_memset(&AllLogPages, 0, 8); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satLogSense: return control\n")); return tiSuccess; } AllocLen = (bit8)((scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]); /* checking PC (Page Control) */ /* nothing */ /* special cases */ if (AllocLen == 4) { TI_DBG1(("satLogSense: AllocLen is 4\n")); switch (scsiCmnd->cdb[2] & SCSI_LOG_SENSE_PAGE_CODE_MASK) { case LOGSENSE_SUPPORTED_LOG_PAGES: TI_DBG5(("satLogSense: case LOGSENSE_SUPPORTED_LOG_PAGES\n")); /* SAT Rev 8, 10.2.5 p76 */ if (pSatDevData->satSMARTFeatureSet == agTRUE) { /* add informational exception log */ flag = 1; if (pSatDevData->satSMARTSelfTest == agTRUE) { /* add Self-Test results log page */ flag = 2; } } else { /* only supported, no informational exception log, no Self-Test results log page */ flag = 0; } lenRead = 4; AllLogPages[0] = LOGSENSE_SUPPORTED_LOG_PAGES; /* page code */ AllLogPages[1] = 0; /* reserved */ switch (flag) { case 0: /* only supported */ AllLogPages[2] = 0; /* page length */ AllLogPages[3] = 1; /* page length */ break; case 1: /* supported and informational exception log */ AllLogPages[2] = 0; /* page length */ AllLogPages[3] = 2; /* page length */ break; case 2: /* supported and informational exception log */ AllLogPages[2] = 0; /* page length */ AllLogPages[3] = 3; /* page length */ break; default: TI_DBG1(("satLogSense: error unallowed flag value %d\n", flag)); break; } osti_memcpy(pLogPage, &AllLogPages, lenRead); break; case LOGSENSE_SELFTEST_RESULTS_PAGE: TI_DBG5(("satLogSense: case LOGSENSE_SUPPORTED_LOG_PAGES\n")); lenRead = 4; AllLogPages[0] = LOGSENSE_SELFTEST_RESULTS_PAGE; /* page code */ AllLogPages[1] = 0; /* reserved */ /* page length = SELFTEST_RESULTS_LOG_PAGE_LENGTH - 1 - 3 = 400 = 0x190 */ AllLogPages[2] = 0x01; AllLogPages[3] = 0x90; /* page length */ osti_memcpy(pLogPage, &AllLogPages, lenRead); break; case LOGSENSE_INFORMATION_EXCEPTIONS_PAGE: TI_DBG5(("satLogSense: case LOGSENSE_SUPPORTED_LOG_PAGES\n")); lenRead = 4; AllLogPages[0] = LOGSENSE_INFORMATION_EXCEPTIONS_PAGE; /* page code */ AllLogPages[1] = 0; /* reserved */ AllLogPages[2] = 0; /* page length */ AllLogPages[3] = INFORMATION_EXCEPTIONS_LOG_PAGE_LENGTH - 1 - 3; /* page length */ osti_memcpy(pLogPage, &AllLogPages, lenRead); break; default: TI_DBG1(("satLogSense: default Page Code 0x%x\n", scsiCmnd->cdb[2] & SCSI_LOG_SENSE_PAGE_CODE_MASK)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } /* if */ /* SAT rev8 Table 11 p30*/ /* checking Page Code */ switch (scsiCmnd->cdb[2] & SCSI_LOG_SENSE_PAGE_CODE_MASK) { case LOGSENSE_SUPPORTED_LOG_PAGES: TI_DBG5(("satLogSense: case 1\n")); /* SAT Rev 8, 10.2.5 p76 */ if (pSatDevData->satSMARTFeatureSet == agTRUE) { /* add informational exception log */ flag = 1; if (pSatDevData->satSMARTSelfTest == agTRUE) { /* add Self-Test results log page */ flag = 2; } } else { /* only supported, no informational exception log, no Self-Test results log page */ flag = 0; } AllLogPages[0] = 0; /* page code */ AllLogPages[1] = 0; /* reserved */ switch (flag) { case 0: /* only supported */ AllLogPages[2] = 0; /* page length */ AllLogPages[3] = 1; /* page length */ AllLogPages[4] = 0x00; /* supported page list */ lenRead = (bit8)(MIN(AllocLen, 5)); break; case 1: /* supported and informational exception log */ AllLogPages[2] = 0; /* page length */ AllLogPages[3] = 2; /* page length */ AllLogPages[4] = 0x00; /* supported page list */ AllLogPages[5] = 0x10; /* supported page list */ lenRead = (bit8)(MIN(AllocLen, 6)); break; case 2: /* supported and informational exception log */ AllLogPages[2] = 0; /* page length */ AllLogPages[3] = 3; /* page length */ AllLogPages[4] = 0x00; /* supported page list */ AllLogPages[5] = 0x10; /* supported page list */ AllLogPages[6] = 0x2F; /* supported page list */ lenRead = (bit8)(MIN(AllocLen, 7)); break; default: TI_DBG1(("satLogSense: error unallowed flag value %d\n", flag)); break; } osti_memcpy(pLogPage, &AllLogPages, lenRead); /* comparing allocation length to Log Page byte size */ /* SPC-4, 4.3.4.6, p28 */ if (AllocLen > lenRead ) { TI_DBG1(("satLogSense reporting underrun lenRead=0x%x AllocLen=0x%x tiIORequest=%p\n", lenRead, AllocLen, tiIORequest)); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOUnderRun, AllocLen - lenRead, agNULL, satIOContext->interruptContext ); } else { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); } break; case LOGSENSE_SELFTEST_RESULTS_PAGE: TI_DBG5(("satLogSense: case 2\n")); /* checking SMART self-test */ if (pSatDevData->satSMARTSelfTest == agFALSE) { TI_DBG5(("satLogSense: case 2 no SMART Self Test\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); } else { /* if satSMARTEnabled is false, send SMART_ENABLE_OPERATIONS */ if (pSatDevData->satSMARTEnabled == agFALSE) { TI_DBG5(("satLogSense: case 2 calling satSMARTEnable\n")); status = satLogSenseAllocate(tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext, 0, LOG_SENSE_0 ); return status; } else { /* SAT Rev 8, 10.2.4 p74 */ if ( pSatDevData->sat48BitSupport == agTRUE ) { TI_DBG5(("satLogSense: case 2-1 sends READ LOG EXT\n")); status = satLogSenseAllocate(tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext, 512, LOG_SENSE_1 ); return status; } else { TI_DBG5(("satLogSense: case 2-2 sends SMART READ LOG\n")); status = satLogSenseAllocate(tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext, 512, LOG_SENSE_2 ); return status; } } } break; case LOGSENSE_INFORMATION_EXCEPTIONS_PAGE: TI_DBG5(("satLogSense: case 3\n")); /* checking SMART feature set */ if (pSatDevData->satSMARTFeatureSet == agFALSE) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); } else { /* checking SMART feature enabled */ if (pSatDevData->satSMARTEnabled == agFALSE) { satSetSensePayload( pSense, SCSI_SNSKEY_ABORTED_COMMAND, 0, SCSI_SNSCODE_ATA_DEVICE_FEATURE_NOT_ENABLED, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); } else { /* SAT Rev 8, 10.2.3 p72 */ TI_DBG5(("satLogSense: case 3 sends SMART RETURN STATUS\n")); /* sends SMART RETURN STATUS */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_RETURN_STATUS;/* 0xB0 */ fis->h.features = 0xDA; /* FIS features */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMid = 0x4F; /* FIS LBA (15:8 ) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHigh = 0xC2; /* FIS LBA (23:16) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved4 = 0; fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satLogSenseCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } } break; default: TI_DBG1(("satLogSense: default Page Code 0x%x\n", scsiCmnd->cdb[2] & SCSI_LOG_SENSE_PAGE_CODE_MASK)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); break; } /* end switch */ return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satModeSelect6. * * SAT implementation for SCSI satModeSelect6. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satModeSelect6( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit8 *pLogPage; /* Log Page data buffer */ bit32 StartingIndex = 0; bit8 PageCode = 0; bit32 chkCnd = agFALSE; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; pLogPage = (bit8 *) tiScsiRequest->sglVirtualAddr; TI_DBG5(("satModeSelect6: start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satModeSelect6: return control\n")); return tiSuccess; } /* checking PF bit */ if ( !(scsiCmnd->cdb[1] & SCSI_MODE_SELECT6_PF_MASK)) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satModeSelect6: PF bit check \n")); return tiSuccess; } /* checking Block Descriptor Length on Mode parameter header(6)*/ if (pLogPage[3] == 8) { /* mode parameter block descriptor exists */ PageCode = (bit8)(pLogPage[12] & 0x3F); /* page code and index is 4 + 8 */ StartingIndex = 12; } else if (pLogPage[3] == 0) { /* mode parameter block descriptor does not exist */ PageCode = (bit8)(pLogPage[4] & 0x3F); /* page code and index is 4 + 0 */ StartingIndex = 4; ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } else { TI_DBG1(("satModeSelect6: return mode parameter block descriptor 0x%x\n", pLogPage[3])); /* no more than one mode parameter block descriptor shall be supported */ satSetSensePayload( pSense, SCSI_SNSKEY_NO_SENSE, 0, SCSI_SNSCODE_NO_ADDITIONAL_INFO, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } switch (PageCode) /* page code */ { case MODESELECT_CONTROL_PAGE: TI_DBG1(("satModeSelect6: Control mode page\n")); /* compare pLogPage to expected value (SAT Table 65, p67) If not match, return check condition */ if ( pLogPage[StartingIndex+1] != 0x0A || pLogPage[StartingIndex+2] != 0x02 || (pSatDevData->satNCQ == agTRUE && pLogPage[StartingIndex+3] != 0x12) || (pSatDevData->satNCQ == agFALSE && pLogPage[StartingIndex+3] != 0x02) || (pLogPage[StartingIndex+4] & BIT3_MASK) != 0x00 || /* SWP bit */ (pLogPage[StartingIndex+4] & BIT4_MASK) != 0x00 || /* UA_INTLCK_CTRL */ (pLogPage[StartingIndex+4] & BIT5_MASK) != 0x00 || /* UA_INTLCK_CTRL */ (pLogPage[StartingIndex+5] & BIT0_MASK) != 0x00 || /* AUTOLOAD MODE */ (pLogPage[StartingIndex+5] & BIT1_MASK) != 0x00 || /* AUTOLOAD MODE */ (pLogPage[StartingIndex+5] & BIT2_MASK) != 0x00 || /* AUTOLOAD MODE */ (pLogPage[StartingIndex+5] & BIT6_MASK) != 0x00 || /* TAS bit */ pLogPage[StartingIndex+8] != 0xFF || pLogPage[StartingIndex+9] != 0xFF || pLogPage[StartingIndex+10] != 0x00 || pLogPage[StartingIndex+11] != 0x00 ) { chkCnd = agTRUE; } if (chkCnd == agTRUE) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satModeSelect10: unexpected values\n")); } else { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); } return tiSuccess; break; case MODESELECT_READ_WRITE_ERROR_RECOVERY_PAGE: TI_DBG1(("satModeSelect6: Read-Write Error Recovery mode page\n")); if ( (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT6_AWRE_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT6_RC_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT6_EER_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT6_PER_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT6_DTE_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT6_DCR_MASK) || (pLogPage[StartingIndex + 10]) || (pLogPage[StartingIndex + 11]) ) { TI_DBG5(("satModeSelect6: return check condition \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_PARAMETER_LIST, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } else { TI_DBG5(("satModeSelect6: return GOOD \n")); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } break; case MODESELECT_CACHING: /* SAT rev8 Table67, p69*/ TI_DBG5(("satModeSelect6: Caching mode page\n")); if ( (pLogPage[StartingIndex + 2] & 0xFB) || /* 1111 1011 */ (pLogPage[StartingIndex + 3]) || (pLogPage[StartingIndex + 4]) || (pLogPage[StartingIndex + 5]) || (pLogPage[StartingIndex + 6]) || (pLogPage[StartingIndex + 7]) || (pLogPage[StartingIndex + 8]) || (pLogPage[StartingIndex + 9]) || (pLogPage[StartingIndex + 10]) || (pLogPage[StartingIndex + 11]) || (pLogPage[StartingIndex + 12] & 0xC1) || /* 1100 0001 */ (pLogPage[StartingIndex + 13]) || (pLogPage[StartingIndex + 14]) || (pLogPage[StartingIndex + 15]) ) { TI_DBG1(("satModeSelect6: return check condition \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_PARAMETER_LIST, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } else { /* sends ATA SET FEATURES based on WCE bit */ if ( !(pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT6_WCE_MASK) ) { TI_DBG5(("satModeSelect6: disable write cache\n")); /* sends SET FEATURES */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SET_FEATURES; /* 0xEF */ fis->h.features = 0x82; /* disable write cache */ fis->d.lbaLow = 0; /* */ fis->d.lbaMid = 0; /* */ fis->d.lbaHigh = 0; /* */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* */ fis->d.sectorCount = 0; /* */ fis->d.sectorCountExp = 0; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satModeSelect6n10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } else { TI_DBG5(("satModeSelect6: enable write cache\n")); /* sends SET FEATURES */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SET_FEATURES; /* 0xEF */ fis->h.features = 0x02; /* enable write cache */ fis->d.lbaLow = 0; /* */ fis->d.lbaMid = 0; /* */ fis->d.lbaHigh = 0; /* */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* */ fis->d.sectorCount = 0; /* */ fis->d.sectorCountExp = 0; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satModeSelect6n10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } } break; case MODESELECT_INFORMATION_EXCEPTION_CONTROL_PAGE: TI_DBG5(("satModeSelect6: Informational Exception Control mode page\n")); if ( (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT6_PERF_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT6_TEST_MASK) ) { TI_DBG1(("satModeSelect6: return check condition \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_PARAMETER_LIST, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } else { /* sends either ATA SMART ENABLE/DISABLE OPERATIONS based on DEXCPT bit */ if ( !(pLogPage[StartingIndex + 2] & 0x08) ) { TI_DBG5(("satModeSelect6: enable information exceptions reporting\n")); /* sends SMART ENABLE OPERATIONS */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_ENABLE_OPERATIONS; /* 0xB0 */ fis->h.features = 0xD8; /* enable */ fis->d.lbaLow = 0; /* */ fis->d.lbaMid = 0x4F; /* 0x4F */ fis->d.lbaHigh = 0xC2; /* 0xC2 */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* */ fis->d.sectorCount = 0; /* */ fis->d.sectorCountExp = 0; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satModeSelect6n10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } else { TI_DBG5(("satModeSelect6: disable information exceptions reporting\n")); /* sends SMART DISABLE OPERATIONS */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_DISABLE_OPERATIONS; /* 0xB0 */ fis->h.features = 0xD9; /* disable */ fis->d.lbaLow = 0; /* */ fis->d.lbaMid = 0x4F; /* 0x4F */ fis->d.lbaHigh = 0xC2; /* 0xC2 */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* */ fis->d.sectorCount = 0; /* */ fis->d.sectorCountExp = 0; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satModeSelect6n10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } } break; default: TI_DBG1(("satModeSelect6: Error unknown page code 0x%x\n", pLogPage[12])); satSetSensePayload( pSense, SCSI_SNSKEY_NO_SENSE, 0, SCSI_SNSCODE_NO_ADDITIONAL_INFO, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satModeSelect6n10_1. * * This function is part of implementation of ModeSelect6 and ModeSelect10. * When ModeSelect6 or ModeSelect10 is coverted into multiple ATA commands, * this function is used. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satModeSelect6n10_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* sends either ATA SET FEATURES based on DRA bit */ bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; bit8 *pLogPage; /* Log Page data buffer */ bit32 StartingIndex = 0; fis = satIOContext->pFis; pLogPage = (bit8 *) tiScsiRequest->sglVirtualAddr; TI_DBG5(("satModeSelect6_1: start\n")); /* checking Block Descriptor Length on Mode parameter header(6)*/ if (pLogPage[3] == 8) { /* mode parameter block descriptor exists */ StartingIndex = 12; } else { /* mode parameter block descriptor does not exist */ StartingIndex = 4; } /* sends ATA SET FEATURES based on DRA bit */ if ( !(pLogPage[StartingIndex + 12] & SCSI_MODE_SELECT6_DRA_MASK) ) { TI_DBG5(("satModeSelect6_1: enable read look-ahead feature\n")); /* sends SET FEATURES */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SET_FEATURES; /* 0xEF */ fis->h.features = 0xAA; /* enable read look-ahead */ fis->d.lbaLow = 0; /* */ fis->d.lbaMid = 0; /* */ fis->d.lbaHigh = 0; /* */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* */ fis->d.sectorCount = 0; /* */ fis->d.sectorCountExp = 0; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satModeSelect6n10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } else { TI_DBG5(("satModeSelect6_1: disable read look-ahead feature\n")); /* sends SET FEATURES */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SET_FEATURES; /* 0xEF */ fis->h.features = 0x55; /* disable read look-ahead */ fis->d.lbaLow = 0; /* */ fis->d.lbaMid = 0; /* */ fis->d.lbaHigh = 0; /* */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* */ fis->d.sectorCount = 0; /* */ fis->d.sectorCountExp = 0; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satModeSelect6n10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satModeSelect10. * * SAT implementation for SCSI satModeSelect10. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satModeSelect10( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit8 *pLogPage; /* Log Page data buffer */ bit16 BlkDescLen = 0; /* Block Descriptor Length */ bit32 StartingIndex = 0; bit8 PageCode = 0; bit32 chkCnd = agFALSE; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; pLogPage = (bit8 *) tiScsiRequest->sglVirtualAddr; TI_DBG5(("satModeSelect10: start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satModeSelect10: return control\n")); return tiSuccess; } /* checking PF bit */ if ( !(scsiCmnd->cdb[1] & SCSI_MODE_SELECT10_PF_MASK)) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satModeSelect10: PF bit check \n")); return tiSuccess; } BlkDescLen = (bit8)((pLogPage[6] << 8) + pLogPage[7]); /* checking Block Descriptor Length on Mode parameter header(10) and LONGLBA bit*/ if ( (BlkDescLen == 8) && !(pLogPage[4] & SCSI_MODE_SELECT10_LONGLBA_MASK) ) { /* mode parameter block descriptor exists and length is 8 byte */ PageCode = (bit8)(pLogPage[16] & 0x3F); /* page code and index is 8 + 8 */ StartingIndex = 16; } else if ( (BlkDescLen == 16) && (pLogPage[4] & SCSI_MODE_SELECT10_LONGLBA_MASK) ) { /* mode parameter block descriptor exists and length is 16 byte */ PageCode = (bit8)(pLogPage[24] & 0x3F); /* page code and index is 8 + 16 */ StartingIndex = 24; } else if (BlkDescLen == 0) { /* mode parameter block descriptor does not exist */ PageCode = (bit8)(pLogPage[8] & 0x3F); /* page code and index is 8 + 0 */ StartingIndex = 8; ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } else { TI_DBG1(("satModeSelect10: return mode parameter block descriptor 0x%x\n", BlkDescLen)); /* no more than one mode parameter block descriptor shall be supported */ satSetSensePayload( pSense, SCSI_SNSKEY_NO_SENSE, 0, SCSI_SNSCODE_NO_ADDITIONAL_INFO, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } /* for debugging only */ if (StartingIndex == 8) { tdhexdump("startingindex 8", (bit8 *)pLogPage, 8); } else if(StartingIndex == 16) { if (PageCode == MODESELECT_CACHING) { tdhexdump("startingindex 16", (bit8 *)pLogPage, 16+20); } else { tdhexdump("startingindex 16", (bit8 *)pLogPage, 16+12); } } else { if (PageCode == MODESELECT_CACHING) { tdhexdump("startingindex 24", (bit8 *)pLogPage, 24+20); } else { tdhexdump("startingindex 24", (bit8 *)pLogPage, 24+12); } } switch (PageCode) /* page code */ { case MODESELECT_CONTROL_PAGE: TI_DBG5(("satModeSelect10: Control mode page\n")); /* compare pLogPage to expected value (SAT Table 65, p67) If not match, return check condition */ if ( pLogPage[StartingIndex+1] != 0x0A || pLogPage[StartingIndex+2] != 0x02 || (pSatDevData->satNCQ == agTRUE && pLogPage[StartingIndex+3] != 0x12) || (pSatDevData->satNCQ == agFALSE && pLogPage[StartingIndex+3] != 0x02) || (pLogPage[StartingIndex+4] & BIT3_MASK) != 0x00 || /* SWP bit */ (pLogPage[StartingIndex+4] & BIT4_MASK) != 0x00 || /* UA_INTLCK_CTRL */ (pLogPage[StartingIndex+4] & BIT5_MASK) != 0x00 || /* UA_INTLCK_CTRL */ (pLogPage[StartingIndex+5] & BIT0_MASK) != 0x00 || /* AUTOLOAD MODE */ (pLogPage[StartingIndex+5] & BIT1_MASK) != 0x00 || /* AUTOLOAD MODE */ (pLogPage[StartingIndex+5] & BIT2_MASK) != 0x00 || /* AUTOLOAD MODE */ (pLogPage[StartingIndex+5] & BIT6_MASK) != 0x00 || /* TAS bit */ pLogPage[StartingIndex+8] != 0xFF || pLogPage[StartingIndex+9] != 0xFF || pLogPage[StartingIndex+10] != 0x00 || pLogPage[StartingIndex+11] != 0x00 ) { chkCnd = agTRUE; } if (chkCnd == agTRUE) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satModeSelect10: unexpected values\n")); } else { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); } return tiSuccess; break; case MODESELECT_READ_WRITE_ERROR_RECOVERY_PAGE: TI_DBG5(("satModeSelect10: Read-Write Error Recovery mode page\n")); if ( (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT10_AWRE_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT10_RC_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT10_EER_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT10_PER_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT10_DTE_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT10_DCR_MASK) || (pLogPage[StartingIndex + 10]) || (pLogPage[StartingIndex + 11]) ) { TI_DBG1(("satModeSelect10: return check condition \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_PARAMETER_LIST, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } else { TI_DBG2(("satModeSelect10: return GOOD \n")); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } break; case MODESELECT_CACHING: /* SAT rev8 Table67, p69*/ TI_DBG5(("satModeSelect10: Caching mode page\n")); if ( (pLogPage[StartingIndex + 2] & 0xFB) || /* 1111 1011 */ (pLogPage[StartingIndex + 3]) || (pLogPage[StartingIndex + 4]) || (pLogPage[StartingIndex + 5]) || (pLogPage[StartingIndex + 6]) || (pLogPage[StartingIndex + 7]) || (pLogPage[StartingIndex + 8]) || (pLogPage[StartingIndex + 9]) || (pLogPage[StartingIndex + 10]) || (pLogPage[StartingIndex + 11]) || (pLogPage[StartingIndex + 12] & 0xC1) || /* 1100 0001 */ (pLogPage[StartingIndex + 13]) || (pLogPage[StartingIndex + 14]) || (pLogPage[StartingIndex + 15]) ) { TI_DBG1(("satModeSelect10: return check condition \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_PARAMETER_LIST, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } else { /* sends ATA SET FEATURES based on WCE bit */ if ( !(pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT10_WCE_MASK) ) { TI_DBG5(("satModeSelect10: disable write cache\n")); /* sends SET FEATURES */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SET_FEATURES; /* 0xEF */ fis->h.features = 0x82; /* disable write cache */ fis->d.lbaLow = 0; /* */ fis->d.lbaMid = 0; /* */ fis->d.lbaHigh = 0; /* */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* */ fis->d.sectorCount = 0; /* */ fis->d.sectorCountExp = 0; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satModeSelect6n10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } else { TI_DBG5(("satModeSelect10: enable write cache\n")); /* sends SET FEATURES */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SET_FEATURES; /* 0xEF */ fis->h.features = 0x02; /* enable write cache */ fis->d.lbaLow = 0; /* */ fis->d.lbaMid = 0; /* */ fis->d.lbaHigh = 0; /* */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* */ fis->d.sectorCount = 0; /* */ fis->d.sectorCountExp = 0; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satModeSelect6n10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } } break; case MODESELECT_INFORMATION_EXCEPTION_CONTROL_PAGE: TI_DBG5(("satModeSelect10: Informational Exception Control mode page\n")); if ( (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT10_PERF_MASK) || (pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT10_TEST_MASK) ) { TI_DBG1(("satModeSelect10: return check condition \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_PARAMETER_LIST, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } else { /* sends either ATA SMART ENABLE/DISABLE OPERATIONS based on DEXCPT bit */ if ( !(pLogPage[StartingIndex + 2] & SCSI_MODE_SELECT10_DEXCPT_MASK) ) { TI_DBG5(("satModeSelect10: enable information exceptions reporting\n")); /* sends SMART ENABLE OPERATIONS */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_ENABLE_OPERATIONS; /* 0xB0 */ fis->h.features = 0xD8; /* enable */ fis->d.lbaLow = 0; /* */ fis->d.lbaMid = 0x4F; /* 0x4F */ fis->d.lbaHigh = 0xC2; /* 0xC2 */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* */ fis->d.sectorCount = 0; /* */ fis->d.sectorCountExp = 0; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satModeSelect6n10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } else { TI_DBG5(("satModeSelect10: disable information exceptions reporting\n")); /* sends SMART DISABLE OPERATIONS */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_SMART_DISABLE_OPERATIONS; /* 0xB0 */ fis->h.features = 0xD9; /* disable */ fis->d.lbaLow = 0; /* */ fis->d.lbaMid = 0x4F; /* 0x4F */ fis->d.lbaHigh = 0xC2; /* 0xC2 */ fis->d.device = 0; /* */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* */ fis->d.sectorCount = 0; /* */ fis->d.sectorCountExp = 0; /* */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satModeSelect6n10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } } break; default: TI_DBG1(("satModeSelect10: Error unknown page code 0x%x\n", pLogPage[12])); satSetSensePayload( pSense, SCSI_SNSKEY_NO_SENSE, 0, SCSI_SNSCODE_NO_ADDITIONAL_INFO, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satSynchronizeCache10. * * SAT implementation for SCSI satSynchronizeCache10. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satSynchronizeCache10( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satSynchronizeCache10: start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satSynchronizeCache10: return control\n")); return tiSuccess; } /* checking IMMED bit */ if (scsiCmnd->cdb[1] & SCSI_SYNC_CACHE_IMMED_MASK) { TI_DBG1(("satSynchronizeCache10: GOOD status due to IMMED bit\n")); /* return GOOD status first here */ ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); } /* sends FLUSH CACHE or FLUSH CACHE EXT */ if (pSatDevData->sat48BitSupport == agTRUE) { TI_DBG5(("satSynchronizeCache10: sends FLUSH CACHE EXT\n")); /* FLUSH CACHE EXT */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_FLUSH_CACHE_EXT; /* 0xEA */ fis->h.features = 0; /* FIS reserve */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved4 = 0; fis->d.reserved5 = 0; } else { TI_DBG5(("satSynchronizeCache10: sends FLUSH CACHE\n")); /* FLUSH CACHE */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_FLUSH_CACHE; /* 0xE7 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved4 = 0; fis->d.reserved5 = 0; } agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSynchronizeCache10n16CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satSynchronizeCache16. * * SAT implementation for SCSI satSynchronizeCache16. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satSynchronizeCache16( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satSynchronizeCache16: start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[15] & SCSI_NACA_MASK) || (scsiCmnd->cdb[15] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satSynchronizeCache16: return control\n")); return tiSuccess; } /* checking IMMED bit */ if (scsiCmnd->cdb[1] & SCSI_SYNC_CACHE_IMMED_MASK) { TI_DBG1(("satSynchronizeCache16: GOOD status due to IMMED bit\n")); /* return GOOD status first here */ ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); } /* sends FLUSH CACHE or FLUSH CACHE EXT */ if (pSatDevData->sat48BitSupport == agTRUE) { TI_DBG5(("satSynchronizeCache16: sends FLUSH CACHE EXT\n")); /* FLUSH CACHE EXT */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_FLUSH_CACHE_EXT; /* 0xEA */ fis->h.features = 0; /* FIS reserve */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved4 = 0; fis->d.reserved5 = 0; } else { TI_DBG5(("satSynchronizeCache16: sends FLUSH CACHE\n")); /* FLUSH CACHE */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_FLUSH_CACHE; /* 0xE7 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.device = 0; /* FIS DEV is discared in SATA */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved4 = 0; fis->d.reserved5 = 0; } agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satSynchronizeCache10n16CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWriteAndVerify10. * * SAT implementation for SCSI satWriteAndVerify10. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWriteAndVerify10( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* combination of write10 and verify10 */ bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[4]; bit8 TL[4]; bit32 rangeChk = agFALSE; /* lba and tl range check */ pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satWriteAndVerify10: start\n")); /* checking BYTCHK bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE_N_VERIFY_BYTCHK_MASK) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteAndVerify10: BYTCHK bit checking \n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteAndVerify10: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; /* LSB */ TL[0] = 0; TL[1] = 0; TL[2] = scsiCmnd->cdb[7]; /* MSB */ TL[3] = scsiCmnd->cdb[8]; /* LSB */ rangeChk = satAddNComparebit32(LBA, TL); /* cbd10; computing LBA and transfer length */ lba = (scsiCmnd->cdb[2] << (8*3)) + (scsiCmnd->cdb[3] << (8*2)) + (scsiCmnd->cdb[4] << 8) + scsiCmnd->cdb[5]; tl = (scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]; /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteAndVerify10: return LBA out of range\n")); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satWrite10: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* case 1 and 2 */ if (!rangeChk) // if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* WRITE DMA*/ /* can't fit the transfer length */ TI_DBG5(("satWriteAndVerify10: case 2 !!!\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA; } else { /* case 1 */ /* WRITE MULTIPLE or WRITE SECTOR(S) */ /* WRITE SECTORS for easier implemetation */ /* can't fit the transfer length */ TI_DBG5(("satWriteAndVerify10: case 1 !!!\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0x30 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* WRITE DMA EXT or WRITE DMA FUA EXT */ TI_DBG5(("satWriteAndVerify10: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* SAT_WRITE_DMA_FUA_EXT is optional and we don't support it */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA_EXT; } else { /* case 4 */ /* WRITE MULTIPLE EXT or WRITE MULTIPLE FUA EXT or WRITE SECTOR(S) EXT */ /* WRITE SECTORS EXT for easier implemetation */ TI_DBG5(("satWriteAndVerify10: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS_EXT; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* WRITE FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satWriteAndVerify10: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG5(("satWriteAndVerify10: case 5\n")); /* Support 48-bit FPDMA addressing, use WRITE FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->h.features = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE_N_VERIFY10_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; satIOContext->ATACmd = SAT_WRITE_FPDMA_QUEUED; } satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { /* SAT_WRITE_FPDMA_QUEUED */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satWriteAndVerify10: NON CHAINED data\n")); /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedWriteNVerifyCB; } else { TI_DBG1(("satWriteAndVerify10: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { /* SAT_WRITE_FPDMA_QUEUED */ fis->h.features = 0xFF; fis->d.featuresExp = 0xFF; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satChainedWriteNVerifyCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } #ifdef REMOVED GLOBAL bit32 satWriteAndVerify10( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* combination of write10 and verify10 */ bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satWriteAndVerify10: start\n")); /* checking BYTCHK bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE_N_VERIFY_BYTCHK_MASK) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteAndVerify10: BYTCHK bit checking \n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satWriteAndVerify10: return control\n")); return tiSuccess; } /* let's do write10 */ if ( pSatDevData->sat48BitSupport != agTRUE ) { /* writeandverify10 but no support for 48 bit addressing -> problem in transfer length(sector count) */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteAndVerify10: return internal checking\n")); return tiSuccess; } /* cbd10; computing LBA and transfer length */ lba = (scsiCmnd->cdb[2] << (8*3)) + (scsiCmnd->cdb[3] << (8*2)) + (scsiCmnd->cdb[4] << 8) + scsiCmnd->cdb[5]; tl = (scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]; /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteAndVerify10: return LBA out of range\n")); return tiSuccess; } } /* case 1 and 2 */ if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* WRITE DMA*/ /* can't fit the transfer length */ TI_DBG5(("satWriteAndVerify10: case 2 !!!\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (0x4 << 4) | (scsiCmnd->cdb[2] & 0xF); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA; } else { /* case 1 */ /* WRITE MULTIPLE or WRITE SECTOR(S) */ /* WRITE SECTORS for easier implemetation */ /* can't fit the transfer length */ TI_DBG5(("satWriteAndVerify10: case 1 !!!\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0x30 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (0x4 << 4) | (scsiCmnd->cdb[2] & 0xF); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* WRITE DMA EXT or WRITE DMA FUA EXT */ TI_DBG5(("satWriteAndVerify10: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* SAT_WRITE_DMA_FUA_EXT is optional and we don't support it */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; } else { /* case 4 */ /* WRITE MULTIPLE EXT or WRITE MULTIPLE FUA EXT or WRITE SECTOR(S) EXT */ /* WRITE SECTORS EXT for easier implemetation */ TI_DBG5(("satWriteAndVerify10: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* WRITE FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satWriteAndVerify10: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG5(("satWriteAndVerify10: case 5\n")); /* Support 48-bit FPDMA addressing, use WRITE FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->h.features = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE_N_VERIFY10_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satWriteAndVerify10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } #endif /* REMOVED */ #ifdef REMOVED /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWriteAndVerify10_1. * * SAT implementation for SCSI satWriteAndVerify10_1. * Sub function of satWriteAndVerify10 * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWriteAndVerify10_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satWriteAndVerify10_1: start\n")); if (pSatDevData->sat48BitSupport == agTRUE) { fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set 01000000 */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satWriteAndVerify10CB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG1(("satWriteAndVerify10_1: return status %d\n", status)); return (status); } else { /* can't fit in SAT_READ_VERIFY_SECTORS becasue of Sector Count and LBA */ TI_DBG1(("satWriteAndVerify10_1: can't fit in SAT_READ_VERIFY_SECTORS\n")); return tiError; } return tiSuccess; } #endif /* REMOVED */ /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWriteAndVerify12. * * SAT implementation for SCSI satWriteAndVerify12. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWriteAndVerify12( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* combination of write12 and verify12 temp: since write12 is not support (due to internal checking), no support */ bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[4]; bit8 TL[4]; bit32 rangeChk = agFALSE; /* lba and tl range check */ pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satWriteAndVerify12: start\n")); /* checking BYTCHK bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE_N_VERIFY_BYTCHK_MASK) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteAndVerify12: BYTCHK bit checking \n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[11] & SCSI_NACA_MASK) || (scsiCmnd->cdb[11] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satWriteAndVerify12: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; /* LSB */ TL[0] = scsiCmnd->cdb[6]; /* MSB */ TL[1] = scsiCmnd->cdb[7]; TL[2] = scsiCmnd->cdb[7]; TL[3] = scsiCmnd->cdb[8]; /* LSB */ rangeChk = satAddNComparebit32(LBA, TL); lba = satComputeCDB12LBA(satIOContext); tl = satComputeCDB12TL(satIOContext); /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (lba > SAT_TR_LBA_LIMIT - 1) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteAndVerify12: return LBA out of range, not EXT\n")); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satWriteAndVerify12: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* case 1 and 2 */ if (!rangeChk) // if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* WRITE DMA*/ /* In case that we can't fit the transfer length, we loop */ TI_DBG5(("satWriteAndVerify12: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA; } else { /* case 1 */ /* WRITE MULTIPLE or WRITE SECTOR(S) */ /* WRITE SECTORS for easier implemetation */ /* In case that we can't fit the transfer length, we loop */ TI_DBG5(("satWriteAndVerify12: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0x30 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* WRITE DMA EXT or WRITE DMA FUA EXT */ TI_DBG5(("satWriteAndVerify12: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* SAT_WRITE_DMA_FUA_EXT is optional and we don't support it */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[8]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA_EXT; } else { /* case 4 */ /* WRITE MULTIPLE EXT or WRITE MULTIPLE FUA EXT or WRITE SECTOR(S) EXT */ /* WRITE SECTORS EXT for easier implemetation */ TI_DBG5(("satWriteAndVerify12: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[8]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS_EXT; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* WRITE FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satWriteAndVerify12: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG6(("satWriteAndVerify12: case 5\n")); /* Support 48-bit FPDMA addressing, use WRITE FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->h.features = scsiCmnd->cdb[9]; /* FIS sector count (7:0) */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE12_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = scsiCmnd->cdb[8]; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; satIOContext->ATACmd = SAT_WRITE_FPDMA_QUEUED; } satIOContext->currentLBA = lba; // satIOContext->OrgLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { /* SAT_WRITE_FPDMA_QUEUEDK */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } satIOContext->LoopNum = LoopNum; satIOContext->LoopNum2 = LoopNum; if (LoopNum == 1) { TI_DBG5(("satWriteAndVerify12: NON CHAINED data\n")); /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedWriteNVerifyCB; } else { TI_DBG1(("satWriteAndVerify12: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { /* SAT_WRITE_FPDMA_QUEUED */ fis->h.features = 0xFF; fis->d.featuresExp = 0xFF; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satChainedWriteNVerifyCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } GLOBAL bit32 satNonChainedWriteNVerify_Verify( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satNonChainedWriteNVerify_Verify: start\n")); if (pSatDevData->sat48BitSupport == agTRUE) { fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set 01000000 */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedWriteNVerifyCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG1(("satNonChainedWriteNVerify_Verify: return status %d\n", status)); return (status); } else { /* can't fit in SAT_READ_VERIFY_SECTORS becasue of Sector Count and LBA */ TI_DBG1(("satNonChainedWriteNVerify_Verify: can't fit in SAT_READ_VERIFY_SECTORS\n")); return tiError; } } GLOBAL bit32 satChainedWriteNVerify_Write( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* Assumption: error check on lba and tl has been done in satWrite*() lba = lba + tl; */ bit32 status; satIOContext_t *satOrgIOContext = agNULL; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; bit32 lba = 0; bit32 DenomTL = 0xFF; bit32 Remainder = 0; bit8 LBA[4]; /* 0 MSB, 3 LSB */ TI_DBG1(("satChainedWriteNVerify_Write: start\n")); fis = satIOContext->pFis; satOrgIOContext = satIOContext->satOrgIOContext; scsiCmnd = satOrgIOContext->pScsiCmnd; osti_memset(LBA,0, sizeof(LBA)); switch (satOrgIOContext->ATACmd) { case SAT_WRITE_DMA: DenomTL = 0xFF; break; case SAT_WRITE_SECTORS: DenomTL = 0xFF; break; case SAT_WRITE_DMA_EXT: DenomTL = 0xFFFF; break; case SAT_WRITE_DMA_FUA_EXT: DenomTL = 0xFFFF; break; case SAT_WRITE_SECTORS_EXT: DenomTL = 0xFFFF; break; case SAT_WRITE_FPDMA_QUEUED: DenomTL = 0xFFFF; break; default: TI_DBG1(("satChainedWriteNVerify_Write: error incorrect ata command 0x%x\n", satIOContext->ATACmd)); return tiError; break; } Remainder = satOrgIOContext->OrgTL % DenomTL; satOrgIOContext->currentLBA = satOrgIOContext->currentLBA + DenomTL; lba = satOrgIOContext->currentLBA; LBA[0] = (bit8)((lba & 0xF000) >> (8 * 3)); /* MSB */ LBA[1] = (bit8)((lba & 0xF00) >> (8 * 2)); LBA[2] = (bit8)((lba & 0xF0) >> 8); LBA[3] = (bit8)(lba & 0xF); /* LSB */ switch (satOrgIOContext->ATACmd) { case SAT_WRITE_DMA: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (LBA[0] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)Remainder; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; break; case SAT_WRITE_SECTORS: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0x30 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (LBA[0] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)Remainder; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; break; case SAT_WRITE_DMA_EXT: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x3D */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = LBA[0]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.sectorCountExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; break; case SAT_WRITE_SECTORS_EXT: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = LBA[0]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.sectorCountExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; break; case SAT_WRITE_FPDMA_QUEUED: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE10_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = LBA[0];; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->h.features = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.featuresExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->h.features = 0xFF; /* FIS sector count (7:0) */ fis->d.featuresExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; break; default: TI_DBG1(("satChainedWriteNVerify_Write: error incorrect ata command 0x%x\n", satIOContext->ATACmd)); return tiError; break; } /* Initialize CB for SATA completion. */ /* chained data */ satIOContext->satCompleteCB = &satChainedWriteNVerifyCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satChainedWriteNVerify_Write: return\n")); return (status); } /* similar to write12 and verify10; this will be similar to verify12 */ GLOBAL bit32 satChainedWriteNVerify_Start_Verify( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* deal with transfer length; others have been handled previously at this point; no LBA check; no range check; */ bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; satDeviceData_t *pSatDevData; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[4]; bit8 TL[4]; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satChainedWriteNVerify_Start_Verify: start\n")); osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; /* LSB */ TL[0] = scsiCmnd->cdb[6]; /* MSB */ TL[1] = scsiCmnd->cdb[7]; TL[2] = scsiCmnd->cdb[7]; TL[3] = scsiCmnd->cdb[8]; /* LSB */ lba = satComputeCDB12LBA(satIOContext); tl = satComputeCDB12TL(satIOContext); if (pSatDevData->sat48BitSupport == agTRUE) { TI_DBG5(("satChainedWriteNVerify_Start_Verify: SAT_READ_VERIFY_SECTORS_EXT\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set 01000000 */ fis->d.lbaLowExp = scsiCmnd->cdb[2]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[7]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; satIOContext->ATACmd = SAT_READ_VERIFY_SECTORS_EXT; } else { TI_DBG5(("satChainedWriteNVerify_Start_Verify: SAT_READ_VERIFY_SECTORS\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS; /* 0x40 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[5]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[4]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[3]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[2] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[8]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; satIOContext->ATACmd = SAT_READ_VERIFY_SECTORS; } satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_READ_VERIFY_SECTORS) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_READ_VERIFY_SECTORS_EXT) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { TI_DBG1(("satChainedWriteNVerify_Start_Verify: error case 1!!!\n")); LoopNum = 1; } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satChainedWriteNVerify_Start_Verify: NON CHAINED data\n")); /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedWriteNVerifyCB; } else { TI_DBG1(("satChainedWriteNVerify_Start_Verify: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_READ_VERIFY_SECTORS) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_READ_VERIFY_SECTORS_EXT) { fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { TI_DBG1(("satChainedWriteNVerify_Start_Verify: error case 2!!!\n")); } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satChainedWriteNVerifyCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } GLOBAL bit32 satChainedWriteNVerify_Verify( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; satIOContext_t *satOrgIOContext = agNULL; agsaFisRegHostToDevice_t *fis; bit32 agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; bit32 lba = 0; bit32 DenomTL = 0xFF; bit32 Remainder = 0; bit8 LBA[4]; /* 0 MSB, 3 LSB */ TI_DBG2(("satChainedWriteNVerify_Verify: start\n")); fis = satIOContext->pFis; satOrgIOContext = satIOContext->satOrgIOContext; osti_memset(LBA,0, sizeof(LBA)); switch (satOrgIOContext->ATACmd) { case SAT_READ_VERIFY_SECTORS: DenomTL = 0xFF; break; case SAT_READ_VERIFY_SECTORS_EXT: DenomTL = 0xFFFF; break; default: TI_DBG1(("satChainedWriteNVerify_Verify: error incorrect ata command 0x%x\n", satIOContext->ATACmd)); return tiError; break; } Remainder = satOrgIOContext->OrgTL % DenomTL; satOrgIOContext->currentLBA = satOrgIOContext->currentLBA + DenomTL; lba = satOrgIOContext->currentLBA; LBA[0] = (bit8)((lba & 0xF000) >> (8 * 3)); /* MSB */ LBA[1] = (bit8)((lba & 0xF00) >> (8 * 2)); LBA[2] = (bit8)((lba & 0xF0) >> 8); LBA[3] = (bit8)(lba & 0xF); /* LSB */ switch (satOrgIOContext->ATACmd) { case SAT_READ_VERIFY_SECTORS: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS; /* 0x40 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (LBA[0] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)Remainder; /* FIS sector count (7:0) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ } fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; break; case SAT_READ_VERIFY_SECTORS_EXT: fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT; /* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[3]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[2]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[1]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = LBA[0]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ if (satOrgIOContext->LoopNum == 1) { /* last loop */ fis->d.sectorCount = (bit8)(Remainder & 0xFF); /* FIS sector count (7:0) */ fis->d.sectorCountExp = (bit8)((Remainder & 0xFF00) >> 8); /* FIS sector count (15:8) */ } else { fis->d.sectorCount = 0xFF; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0xFF; /* FIS sector count (15:8) */ } fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; break; default: TI_DBG1(("satChainedWriteNVerify_Verify: error incorrect ata command 0x%x\n", satIOContext->ATACmd)); return tiError; break; } /* Initialize CB for SATA completion. */ /* chained data */ satIOContext->satCompleteCB = &satChainedWriteNVerifyCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satChainedWriteNVerify_Verify: return\n")); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWriteAndVerify16. * * SAT implementation for SCSI satWriteAndVerify16. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satWriteAndVerify16( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* combination of write16 and verify16 since write16 has 8 bytes LBA -> problem ATA LBA(upto 6 bytes), no support */ bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 lba = 0; bit32 tl = 0; bit32 LoopNum = 1; bit8 LBA[8]; bit8 TL[8]; bit32 rangeChk = agFALSE; /* lba and tl range check */ bit32 limitChk = agFALSE; /* lba and tl range check */ pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; TI_DBG5(("satWriteAndVerify16:start\n")); /* checking BYTCHK bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE_N_VERIFY_BYTCHK_MASK) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteAndVerify16: BYTCHK bit checking \n")); return tiSuccess; } /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[15] & SCSI_NACA_MASK) || (scsiCmnd->cdb[15] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG2(("satWriteAndVerify16: return control\n")); return tiSuccess; } osti_memset(LBA, 0, sizeof(LBA)); osti_memset(TL, 0, sizeof(TL)); /* do not use memcpy due to indexing in LBA and TL */ LBA[0] = scsiCmnd->cdb[2]; /* MSB */ LBA[1] = scsiCmnd->cdb[3]; LBA[2] = scsiCmnd->cdb[4]; LBA[3] = scsiCmnd->cdb[5]; LBA[4] = scsiCmnd->cdb[6]; LBA[5] = scsiCmnd->cdb[7]; LBA[6] = scsiCmnd->cdb[8]; LBA[7] = scsiCmnd->cdb[9]; /* LSB */ TL[0] = 0; TL[1] = 0; TL[2] = 0; TL[3] = 0; TL[4] = scsiCmnd->cdb[10]; /* MSB */ TL[5] = scsiCmnd->cdb[11]; TL[6] = scsiCmnd->cdb[12]; TL[7] = scsiCmnd->cdb[13]; /* LSB */ rangeChk = satAddNComparebit64(LBA, TL); limitChk = satCompareLBALimitbit(LBA); lba = satComputeCDB16LBA(satIOContext); tl = satComputeCDB16TL(satIOContext); /* Table 34, 9.1, p 46 */ /* note: As of 2/10/2006, no support for DMA QUEUED */ /* Table 34, 9.1, p 46, b When no 48-bit addressing support or NCQ, if LBA is beyond (2^28 - 1), return check condition */ if (pSatDevData->satNCQ != agTRUE && pSatDevData->sat48BitSupport != agTRUE ) { if (limitChk) { TI_DBG1(("satWriteAndVerify16: return LBA out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } if (rangeChk) // if (lba + tl > SAT_TR_LBA_LIMIT) { TI_DBG1(("satWriteAndVerify16: return LBA+TL out of range, not EXT\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /* case 1 and 2 */ if (!rangeChk) // if (lba + tl <= SAT_TR_LBA_LIMIT) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* WRITE DMA*/ /* In case that we can't fit the transfer length, we loop */ TI_DBG5(("satWriteAndVerify16: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[6] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA; } else { /* case 1 */ /* WRITE MULTIPLE or WRITE SECTOR(S) */ /* WRITE SECTORS for easier implemetation */ /* In case that we can't fit the transfer length, we loop */ TI_DBG5(("satWriteAndVerify16: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0x30 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (scsiCmnd->cdb[6] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS; } } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* WRITE DMA EXT or WRITE DMA FUA EXT */ TI_DBG5(("satWriteAndVerify16: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* SAT_WRITE_DMA_FUA_EXT is optional and we don't support it */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[6]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = scsiCmnd->cdb[5]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = scsiCmnd->cdb[4]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[12]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA_EXT; } else { /* case 4 */ /* WRITE MULTIPLE EXT or WRITE MULTIPLE FUA EXT or WRITE SECTOR(S) EXT */ /* WRITE SECTORS EXT for easier implemetation */ TI_DBG5(("satWriteAndVerify16: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = scsiCmnd->cdb[6]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = scsiCmnd->cdb[5]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = scsiCmnd->cdb[4]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.sectorCountExp = scsiCmnd->cdb[12]; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS_EXT; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* WRITE FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satWriteAndVerify16: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG6(("satWriteAndVerify16: case 5\n")); /* Support 48-bit FPDMA addressing, use WRITE FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->h.features = scsiCmnd->cdb[13]; /* FIS sector count (7:0) */ fis->d.lbaLow = scsiCmnd->cdb[9]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = scsiCmnd->cdb[8]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = scsiCmnd->cdb[7]; /* FIS LBA (23:16) */ /* Check FUA bit */ if (scsiCmnd->cdb[1] & SCSI_WRITE16_FUA_MASK) fis->d.device = 0xC0; /* FIS FUA set */ else fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = scsiCmnd->cdb[6]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = scsiCmnd->cdb[5]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = scsiCmnd->cdb[4]; /* FIS LBA (47:40) */ fis->d.featuresExp = scsiCmnd->cdb[12]; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; satIOContext->ATACmd = SAT_WRITE_FPDMA_QUEUED; } satIOContext->currentLBA = lba; satIOContext->OrgTL = tl; /* computing number of loop and remainder for tl 0xFF in case not ext 0xFFFF in case EXT */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { LoopNum = satComputeLoopNum(tl, 0xFF); } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { /* SAT_READ_SECTORS_EXT, SAT_READ_DMA_EXT */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } else { /* SAT_WRITE_FPDMA_QUEUEDK */ LoopNum = satComputeLoopNum(tl, 0xFFFF); } satIOContext->LoopNum = LoopNum; if (LoopNum == 1) { TI_DBG5(("satWriteAndVerify16: NON CHAINED data\n")); /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satNonChainedWriteNVerifyCB; } else { TI_DBG1(("satWriteAndVerify16: CHAINED data\n")); /* re-setting tl */ if (fis->h.command == SAT_WRITE_SECTORS || fis->h.command == SAT_WRITE_DMA) { fis->d.sectorCount = 0xFF; } else if (fis->h.command == SAT_WRITE_SECTORS_EXT || fis->h.command == SAT_WRITE_DMA_EXT || fis->h.command == SAT_WRITE_DMA_FUA_EXT ) { fis->d.sectorCount = 0xFF; fis->d.sectorCountExp = 0xFF; } else { /* SAT_WRITE_FPDMA_QUEUED */ fis->h.features = 0xFF; fis->d.featuresExp = 0xFF; } /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satChainedWriteNVerifyCB; } /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satReadMediaSerialNumber. * * SAT implementation for SCSI Read Media Serial Number. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satReadMediaSerialNumber( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; agsaSATAIdentifyData_t *pSATAIdData; bit8 *pSerialNumber; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; pSATAIdData = &(pSatDevData->satIdentifyData); pSerialNumber = (bit8 *) tiScsiRequest->sglVirtualAddr; TI_DBG1(("satReadMediaSerialNumber: start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[11] & SCSI_NACA_MASK) || (scsiCmnd->cdb[11] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satReadMediaSerialNumber: return control\n")); return tiSuccess; } if (tiScsiRequest->scsiCmnd.expDataLength == 4) { if (pSATAIdData->commandSetFeatureDefault & 0x4) { TI_DBG1(("satReadMediaSerialNumber: Media serial number returning only length\n")); /* SPC-3 6.16 p192; filling in length */ pSerialNumber[0] = 0; pSerialNumber[1] = 0; pSerialNumber[2] = 0; pSerialNumber[3] = 0x3C; } else { /* 1 sector - 4 = 512 - 4 to avoid underflow; 0x1fc*/ pSerialNumber[0] = 0; pSerialNumber[1] = 0; pSerialNumber[2] = 0x1; pSerialNumber[3] = 0xfc; } ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } if ( pSatDevData->IDDeviceValid == agTRUE) { if (pSATAIdData->commandSetFeatureDefault & 0x4) { /* word87 bit2 Media serial number is valid */ /* read word 176 to 205; length is 2*30 = 60 = 0x3C*/ tdhexdump("ID satReadMediaSerialNumber", (bit8*)pSATAIdData->currentMediaSerialNumber, 2*30); /* SPC-3 6.16 p192; filling in length */ pSerialNumber[0] = 0; pSerialNumber[1] = 0; pSerialNumber[2] = 0; pSerialNumber[3] = 0x3C; osti_memcpy(&pSerialNumber[4], (void *)pSATAIdData->currentMediaSerialNumber, 60); tdhexdump("satReadMediaSerialNumber", (bit8*)pSerialNumber, 2*30 + 4); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } else { /* word87 bit2 Media serial number is NOT valid */ TI_DBG1(("satReadMediaSerialNumber: Media serial number is NOT valid \n")); if (pSatDevData->sat48BitSupport == agTRUE) { /* READ VERIFY SECTORS EXT */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_SECTORS_EXT; /* 0x24 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = 0; /* FIS LBA (31:24) */ fis->d.lbaMidExp = 0; /* FIS LBA (39:32) */ fis->d.lbaHighExp = 0; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; } else { /* READ VERIFY SECTORS */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_SECTORS; /* 0x20 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; } satIOContext->satCompleteCB = &satReadMediaSerialNumberCB; satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } } else { /* temporary failure */ ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOFailed, tiDetailOtherError, agNULL, satIOContext->interruptContext); return tiSuccess; } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satReadBuffer. * * SAT implementation for SCSI Read Buffer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ /* SAT-2, Revision 00*/ GLOBAL bit32 satReadBuffer( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status = tiSuccess; bit32 agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit32 bufferOffset; bit32 tl; bit8 mode; bit8 bufferID; bit8 *pBuff; pSense = satIOContext->pSense; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; pBuff = (bit8 *) tiScsiRequest->sglVirtualAddr; TI_DBG2(("satReadBuffer: start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satReadBuffer: return control\n")); return tiSuccess; } bufferOffset = (scsiCmnd->cdb[3] << (8*2)) + (scsiCmnd->cdb[4] << 8) + scsiCmnd->cdb[5]; tl = (scsiCmnd->cdb[6] << (8*2)) + (scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]; mode = (bit8)(scsiCmnd->cdb[1] & SCSI_READ_BUFFER_MODE_MASK); bufferID = scsiCmnd->cdb[2]; if (mode == READ_BUFFER_DATA_MODE) /* 2 */ { if (bufferID == 0 && bufferOffset == 0 && tl == 512) { /* send ATA READ BUFFER */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_BUFFER; /* 0xE4 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; satIOContext->satCompleteCB = &satReadBufferCB; satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } if (bufferID == 0 && bufferOffset == 0 && tl != 512) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satReadBuffer: allocation length is not 512; it is %d\n", tl)); return tiSuccess; } if (bufferID == 0 && bufferOffset != 0) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satReadBuffer: buffer offset is not 0; it is %d\n", bufferOffset)); return tiSuccess; } /* all other cases unsupported */ TI_DBG1(("satReadBuffer: unsupported case 1\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } else if (mode == READ_BUFFER_DESCRIPTOR_MODE) /* 3 */ { if (tl < READ_BUFFER_DESCRIPTOR_MODE_DATA_LEN) /* 4 */ { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satReadBuffer: tl < 4; tl is %d\n", tl)); return tiSuccess; } if (bufferID == 0) { /* SPC-4, 6.15.5, p189; SAT-2 Rev00, 8.7.2.3, p41*/ pBuff[0] = 0xFF; pBuff[1] = 0x00; pBuff[2] = 0x02; pBuff[3] = 0x00; if (READ_BUFFER_DESCRIPTOR_MODE_DATA_LEN < tl) { /* underrrun */ TI_DBG1(("satReadBuffer: underrun tl %d data %d\n", tl, READ_BUFFER_DESCRIPTOR_MODE_DATA_LEN)); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOUnderRun, tl - READ_BUFFER_DESCRIPTOR_MODE_DATA_LEN, agNULL, satIOContext->interruptContext ); return tiSuccess; } else { ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } } else { /* We don't support other than bufferID 0 */ satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } else { /* We don't support any other mode */ TI_DBG1(("satReadBuffer: unsupported mode %d\n", mode)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satWriteBuffer. * * SAT implementation for SCSI Write Buffer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ /* SAT-2, Revision 00*/ GLOBAL bit32 satWriteBuffer( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { #ifdef NOT_YET bit32 agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; #endif scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; bit32 bufferOffset; bit32 parmLen; bit8 mode; bit8 bufferID; bit8 *pBuff; pSense = satIOContext->pSense; scsiCmnd = &tiScsiRequest->scsiCmnd; pBuff = (bit8 *) tiScsiRequest->sglVirtualAddr; TI_DBG2(("satWriteBuffer: start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[9] & SCSI_NACA_MASK) || (scsiCmnd->cdb[9] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteBuffer: return control\n")); return tiSuccess; } bufferOffset = (scsiCmnd->cdb[3] << (8*2)) + (scsiCmnd->cdb[4] << 8) + scsiCmnd->cdb[5]; parmLen = (scsiCmnd->cdb[6] << (8*2)) + (scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]; mode = (bit8)(scsiCmnd->cdb[1] & SCSI_READ_BUFFER_MODE_MASK); bufferID = scsiCmnd->cdb[2]; /* for debugging only */ tdhexdump("satWriteBuffer pBuff", (bit8 *)pBuff, 24); if (mode == WRITE_BUFFER_DATA_MODE) /* 2 */ { if (bufferID == 0 && bufferOffset == 0 && parmLen == 512) { TI_DBG1(("satWriteBuffer: sending ATA WRITE BUFFER\n")); /* send ATA WRITE BUFFER */ #ifdef NOT_YET fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_BUFFER; /* 0xE8 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA (27:24) and FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->satCompleteCB = &satWriteBufferCB; satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; #endif /* temp */ ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_GOOD, agNULL, satIOContext->interruptContext); return tiSuccess; } if ( (bufferID == 0 && bufferOffset != 0) || (bufferID == 0 && parmLen != 512) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satWriteBuffer: wrong buffer offset %d or parameter length parmLen %d\n", bufferOffset, parmLen)); return tiSuccess; } /* all other cases unsupported */ TI_DBG1(("satWriteBuffer: unsupported case 1\n")); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } else if (mode == WRITE_BUFFER_DL_MICROCODE_SAVE_MODE) /* 5 */ { TI_DBG1(("satWriteBuffer: not yet supported mode %d\n", mode)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } else { /* We don't support any other mode */ TI_DBG1(("satWriteBuffer: unsupported mode %d\n", mode)); satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_COMMAND, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satReassignBlocks. * * SAT implementation for SCSI Reassign Blocks. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satReassignBlocks( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { /* assumes all LBA fits in ATA command; no boundary condition is checked here yet */ bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit8 *pParmList; /* Log Page data buffer */ bit8 LongLBA; bit8 LongList; bit32 defectListLen; bit8 LBA[8]; bit32 startingIndex; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; pParmList = (bit8 *) tiScsiRequest->sglVirtualAddr; TI_DBG5(("satReassignBlocks: start\n")); /* checking CONTROL */ /* NACA == 1 or LINK == 1*/ if ( (scsiCmnd->cdb[5] & SCSI_NACA_MASK) || (scsiCmnd->cdb[5] & SCSI_LINK_MASK) ) { satSetSensePayload( pSense, SCSI_SNSKEY_ILLEGAL_REQUEST, 0, SCSI_SNSCODE_INVALID_FIELD_IN_CDB, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); TI_DBG1(("satReassignBlocks: return control\n")); return tiSuccess; } osti_memset(satIOContext->LBA, 0, 8); satIOContext->ParmIndex = 0; satIOContext->ParmLen = 0; LongList = (bit8)(scsiCmnd->cdb[1] & SCSI_REASSIGN_BLOCKS_LONGLIST_MASK); LongLBA = (bit8)(scsiCmnd->cdb[1] & SCSI_REASSIGN_BLOCKS_LONGLBA_MASK); osti_memset(LBA, 0, sizeof(LBA)); if (LongList == 0) { defectListLen = (pParmList[2] << 8) + pParmList[3]; } else { defectListLen = (pParmList[0] << (8*3)) + (pParmList[1] << (8*2)) + (pParmList[2] << 8) + pParmList[3]; } /* SBC 5.16.2, p61*/ satIOContext->ParmLen = defectListLen + 4 /* header size */; startingIndex = 4; if (LongLBA == 0) { LBA[4] = pParmList[startingIndex]; /* MSB */ LBA[5] = pParmList[startingIndex+1]; LBA[6] = pParmList[startingIndex+2]; LBA[7] = pParmList[startingIndex+3]; /* LSB */ startingIndex = startingIndex + 4; } else { LBA[0] = pParmList[startingIndex]; /* MSB */ LBA[1] = pParmList[startingIndex+1]; LBA[2] = pParmList[startingIndex+2]; LBA[3] = pParmList[startingIndex+3]; LBA[4] = pParmList[startingIndex+4]; LBA[5] = pParmList[startingIndex+5]; LBA[6] = pParmList[startingIndex+6]; LBA[7] = pParmList[startingIndex+7]; /* LSB */ startingIndex = startingIndex + 8; } tdhexdump("satReassignBlocks Parameter list", (bit8 *)pParmList, 4 + defectListLen); if (pSatDevData->sat48BitSupport == agTRUE) { /* sends READ VERIFY SECTOR(S) EXT*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[5]; /* FIS LBA (23:16) */ fis->d.lbaLowExp = LBA[4]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = LBA[3]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = LBA[2]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.device = 0x40; /* 01000000 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; } else { /* READ VERIFY SECTOR(S)*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS;/* 0x40 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = LBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[5]; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = (bit8)((0x4 << 4) | (LBA[4] & 0xF)); /* DEV and LBA 27:24 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; } osti_memcpy(satIOContext->LBA, LBA, 8); satIOContext->ParmIndex = startingIndex; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satReassignBlocksCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satReassignBlocks_1. * * SAT implementation for SCSI Reassign Blocks. This is helper function for * satReassignBlocks and satReassignBlocksCB. This sends ATA verify command. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ /* next LBA; sends READ VERIFY SECTOR; update LBA and ParmIdx */ GLOBAL bit32 satReassignBlocks_1( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext, satIOContext_t *satOrgIOContext ) { /* assumes all LBA fits in ATA command; no boundary condition is checked here yet tiScsiRequest is OS generated; needs for accessing parameter list */ bit32 agRequestType; satDeviceData_t *pSatDevData; tiIniScsiCmnd_t *scsiCmnd; agsaFisRegHostToDevice_t *fis; bit8 *pParmList; /* Log Page data buffer */ bit8 LongLBA; bit8 LBA[8]; bit32 startingIndex; pSatDevData = satIOContext->pSatDevData; scsiCmnd = &tiScsiRequest->scsiCmnd; fis = satIOContext->pFis; pParmList = (bit8 *) tiScsiRequest->sglVirtualAddr; TI_DBG5(("satReassignBlocks_1: start\n")); LongLBA = (bit8)(scsiCmnd->cdb[1] & SCSI_REASSIGN_BLOCKS_LONGLBA_MASK); osti_memset(LBA, 0, sizeof(LBA)); startingIndex = satOrgIOContext->ParmIndex; if (LongLBA == 0) { LBA[4] = pParmList[startingIndex]; LBA[5] = pParmList[startingIndex+1]; LBA[6] = pParmList[startingIndex+2]; LBA[7] = pParmList[startingIndex+3]; startingIndex = startingIndex + 4; } else { LBA[0] = pParmList[startingIndex]; LBA[1] = pParmList[startingIndex+1]; LBA[2] = pParmList[startingIndex+2]; LBA[3] = pParmList[startingIndex+3]; LBA[4] = pParmList[startingIndex+4]; LBA[5] = pParmList[startingIndex+5]; LBA[6] = pParmList[startingIndex+6]; LBA[7] = pParmList[startingIndex+7]; startingIndex = startingIndex + 8; } if (pSatDevData->sat48BitSupport == agTRUE) { /* sends READ VERIFY SECTOR(S) EXT*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS_EXT;/* 0x42 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[5]; /* FIS LBA (23:16) */ fis->d.lbaLowExp = LBA[4]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = LBA[3]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = LBA[2]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.device = 0x40; /* 01000000 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; } else { /* READ VERIFY SECTOR(S)*/ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_VERIFY_SECTORS;/* 0x40 */ fis->h.features = 0; /* FIS features NA */ fis->d.lbaLow = LBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[5]; /* FIS LBA (23:16) */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.device = (bit8)((0x4 << 4) | (LBA[4] & 0xF)); /* DEV and LBA 27:24 */ fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; } osti_memcpy(satOrgIOContext->LBA, LBA, 8); satOrgIOContext->ParmIndex = startingIndex; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satReassignBlocksCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext ); return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satReassignBlocks_2. * * SAT implementation for SCSI Reassign Blocks. This is helper function for * satReassignBlocks and satReassignBlocksCB. This sends ATA write command. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * \param LBA: Pointer to the LBA to be processed * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ /* current LBA; sends WRITE */ GLOBAL bit32 satReassignBlocks_2( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext, bit8 *LBA ) { /* assumes all LBA fits in ATA command; no boundary condition is checked here yet tiScsiRequest is TD generated for writing */ bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; agsaFisRegHostToDevice_t *fis; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 2 */ /* WRITE DMA*/ /* can't fit the transfer length */ TI_DBG5(("satReassignBlocks_2: case 2\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_DMA; /* 0xCA */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[5]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (LBA[4] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; satIOContext->ATACmd = SAT_WRITE_DMA; } else { /* case 1 */ /* WRITE MULTIPLE or WRITE SECTOR(S) */ /* WRITE SECTORS for easier implemetation */ /* can't fit the transfer length */ TI_DBG5(("satReassignBlocks_2: case 1\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C bit is set */ fis->h.command = SAT_WRITE_SECTORS; /* 0x30 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[7]; /* FIS LBA (23:16) */ /* FIS LBA mode set LBA (27:24) */ fis->d.device = (bit8)((0x4 << 4) | (LBA[4] & 0xF)); fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS; } /* case 3 and 4 */ if (pSatDevData->sat48BitSupport == agTRUE) { if (pSatDevData->satDMASupport == agTRUE && pSatDevData->satDMAEnabled == agTRUE) { /* case 3 */ /* WRITE DMA EXT or WRITE DMA FUA EXT */ TI_DBG5(("satReassignBlocks_2: case 3\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ /* SAT_WRITE_DMA_FUA_EXT is optional and we don't support it */ fis->h.command = SAT_WRITE_DMA_EXT; /* 0x35 */ satIOContext->ATACmd = SAT_WRITE_DMA_EXT; fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[5]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = LBA[4]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = LBA[3]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = LBA[2]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_DMA_WRITE; } else { /* case 4 */ /* WRITE MULTIPLE EXT or WRITE MULTIPLE FUA EXT or WRITE SECTOR(S) EXT */ /* WRITE SECTORS EXT for easier implemetation */ TI_DBG5(("satReassignBlocks_2: case 4\n")); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_SECTORS_EXT; /* 0x34 */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = LBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[5]; /* FIS LBA (23:16) */ fis->d.device = 0x40; /* FIS LBA mode set */ fis->d.lbaLowExp = LBA[4]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = LBA[3]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = LBA[2]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 1; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; /* FIS sector count (15:8) */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_WRITE; satIOContext->ATACmd = SAT_WRITE_SECTORS_EXT; } } /* case 5 */ if (pSatDevData->satNCQ == agTRUE) { /* WRITE FPDMA QUEUED */ if (pSatDevData->sat48BitSupport != agTRUE) { TI_DBG5(("satReassignBlocks_2: case 5 !!! error NCQ but 28 bit address support \n")); satSetSensePayload( pSense, SCSI_SNSKEY_HARDWARE_ERROR, 0, SCSI_SNSCODE_WRITE_ERROR_AUTO_REALLOCATION_FAILED, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, satIOContext->interruptContext ); return tiSuccess; } TI_DBG6(("satWrite10: case 5\n")); /* Support 48-bit FPDMA addressing, use WRITE FPDMA QUEUE command */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_WRITE_FPDMA_QUEUED; /* 0x61 */ fis->h.features = 1; /* FIS sector count (7:0) */ fis->d.lbaLow = LBA[7]; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = LBA[6]; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = LBA[5]; /* FIS LBA (23:16) */ /* Check FUA bit */ fis->d.device = 0x40; /* FIS FUA clear */ fis->d.lbaLowExp = LBA[4]; /* FIS LBA (31:24) */ fis->d.lbaMidExp = LBA[3]; /* FIS LBA (39:32) */ fis->d.lbaHighExp = LBA[2]; /* FIS LBA (47:40) */ fis->d.featuresExp = 0; /* FIS sector count (15:8) */ fis->d.sectorCount = 0; /* Tag (7:3) set by LL layer */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_FPDMA_WRITE; satIOContext->ATACmd = SAT_WRITE_FPDMA_QUEUED; } satIOContext->satCompleteCB = &satReassignBlocksCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, /* not the original, should be the TD generated one */ tiScsiRequest, satIOContext); return (status); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI satPrepareNewIO. * * This function fills in the fields of internal IO generated by TD layer. * This is mostly used in the callback functions. * * \param satNewIntIo: Pointer to the internal IO structure. * \param tiOrgIORequest: Pointer to the original tiIOrequest sent by OS layer * \param satDevData: Pointer to the device data. * \param scsiCmnd: Pointer to SCSI command. * \param satOrgIOContext: Pointer to the original SAT IO Context * * \return * - \e Pointer to the new SAT IO Context */ /*****************************************************************************/ GLOBAL satIOContext_t *satPrepareNewIO( satInternalIo_t *satNewIntIo, tiIORequest_t *tiOrgIORequest, satDeviceData_t *satDevData, tiIniScsiCmnd_t *scsiCmnd, satIOContext_t *satOrgIOContext ) { satIOContext_t *satNewIOContext; tdIORequestBody_t *tdNewIORequestBody; TI_DBG2(("satPrepareNewIO: start\n")); /* the one to be used; good 8/2/07 */ satNewIntIo->satOrgTiIORequest = tiOrgIORequest; /* this is already done in satAllocIntIoResource() */ tdNewIORequestBody = (tdIORequestBody_t *)satNewIntIo->satIntRequestBody; satNewIOContext = &(tdNewIORequestBody->transport.SATA.satIOContext); satNewIOContext->pSatDevData = satDevData; satNewIOContext->pFis = &(tdNewIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev); satNewIOContext->pScsiCmnd = &(satNewIntIo->satIntTiScsiXchg.scsiCmnd); if (scsiCmnd != agNULL) { /* saves only CBD; not scsi command for LBA and number of blocks */ osti_memcpy(satNewIOContext->pScsiCmnd->cdb, scsiCmnd->cdb, 16); } satNewIOContext->pSense = &(tdNewIORequestBody->transport.SATA.sensePayload); satNewIOContext->pTiSenseData = &(tdNewIORequestBody->transport.SATA.tiSenseData); satNewIOContext->pTiSenseData->senseData = satNewIOContext->pSense; satNewIOContext->tiRequestBody = satNewIntIo->satIntRequestBody; satNewIOContext->interruptContext = satNewIOContext->interruptContext; satNewIOContext->satIntIoContext = satNewIntIo; satNewIOContext->ptiDeviceHandle = satOrgIOContext->ptiDeviceHandle; satNewIOContext->satOrgIOContext = satOrgIOContext; /* saves tiScsiXchg; only for writesame10() */ satNewIOContext->tiScsiXchg = satOrgIOContext->tiScsiXchg; return satNewIOContext; } /***************************************************************************** *! \brief satIOAbort * * This routine is called to initiate a I/O abort to SATL. * This routine is independent of HW/LL API. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param taskTag: Pointer to TISA I/O request context/tag to be aborted. * * \return: * * \e tiSuccess: I/O request successfully initiated. * \e tiBusy: No resources available, try again later. * \e tiError: Other errors that prevent the I/O request to be started. * * *****************************************************************************/ GLOBAL bit32 satIOAbort( tiRoot_t *tiRoot, tiIORequest_t *taskTag ) { tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; agsaRoot_t *agRoot; tdIORequestBody_t *tdIORequestBody; tdIORequestBody_t *tdIONewRequestBody; agsaIORequest_t *agIORequest; bit32 status; agsaIORequest_t *agAbortIORequest; tdIORequestBody_t *tdAbortIORequestBody; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; void *osMemHandle; satIOContext_t *satIOContext; satInternalIo_t *satIntIo; TI_DBG2(("satIOAbort: start\n")); agRoot = &(tdsaAllShared->agRootNonInt); tdIORequestBody = (tdIORequestBody_t *)taskTag->tdData; /* needs to distinguish internally generated or externally generated */ satIOContext = &(tdIORequestBody->transport.SATA.satIOContext); satIntIo = satIOContext->satIntIoContext; if (satIntIo == agNULL) { TI_DBG1(("satIOAbort: External, OS generated\n")); agIORequest = &(tdIORequestBody->agIORequest); } else { TI_DBG1(("satIOAbort: Internal, TD generated\n")); tdIONewRequestBody = (tdIORequestBody_t *)satIntIo->satIntRequestBody; agIORequest = &(tdIONewRequestBody->agIORequest); } /* allocating agIORequest for abort itself */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdAbortIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { /* let os process IO */ TI_DBG1(("satIOAbort: ostiAllocMemory failed...\n")); return tiError; } if (tdAbortIORequestBody == agNULL) { /* let os process IO */ TI_DBG1(("satIOAbort: ostiAllocMemory returned NULL tdAbortIORequestBody\n")); return tiError; } /* setup task management structure */ tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; tdAbortIORequestBody->tiDevHandle = tdIORequestBody->tiDevHandle; /* initialize agIORequest */ agAbortIORequest = &(tdAbortIORequestBody->agIORequest); agAbortIORequest->osData = (void *) tdAbortIORequestBody; agAbortIORequest->sdkData = agNULL; /* LL takes care of this */ /* remember IO to be aborted */ tdAbortIORequestBody->tiIOToBeAbortedRequest = taskTag; status = saSATAAbort( agRoot, agAbortIORequest, 0, agNULL, 0, agIORequest, agNULL ); TI_DBG5(("satIOAbort: return status=0x%x\n", status)); if (status == AGSA_RC_SUCCESS) return tiSuccess; else return tiError; } /***************************************************************************** *! \brief satTM * * This routine is called to initiate a TM request to SATL. * This routine is independent of HW/LL API. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param task: SAM-3 task management request. * \param lun: Pointer to LUN. * \param taskTag: Pointer to the associated task where the TM * command is to be applied. * \param currentTaskTag: Pointer to tag/context for this TM request. * * \return: * * \e tiSuccess: I/O request successfully initiated. * \e tiBusy: No resources available, try again later. * \e tiIONoDevice: Invalid device handle. * \e tiError: Other errors that prevent the I/O request to be started. * * *****************************************************************************/ /* save task in satIOContext */ osGLOBAL bit32 satTM( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle, bit32 task, tiLUN_t *lun, tiIORequest_t *taskTag, tiIORequest_t *currentTaskTag, tdIORequestBody_t *tiRequestBody, bit32 NotifyOS ) { tdIORequestBody_t *tdIORequestBody = agNULL; satIOContext_t *satIOContext = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; bit32 status; TI_DBG3(("satTM: tiDeviceHandle=%p task=0x%x\n", tiDeviceHandle, task )); /* set satIOContext fields and etc */ oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; tdIORequestBody = (tdIORequestBody_t *)tiRequestBody; satIOContext = &(tdIORequestBody->transport.SATA.satIOContext); satIOContext->pSatDevData = &oneDeviceData->satDevData; satIOContext->pFis = &tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev; satIOContext->tiRequestBody = tiRequestBody; satIOContext->ptiDeviceHandle = tiDeviceHandle; satIOContext->satIntIoContext = agNULL; satIOContext->satOrgIOContext = agNULL; /* followings are used only for internal IO */ satIOContext->currentLBA = 0; satIOContext->OrgTL = 0; /* saving task in satIOContext */ satIOContext->TMF = task; satIOContext->satToBeAbortedIOContext = agNULL; if (NotifyOS == agTRUE) { satIOContext->NotifyOS = agTRUE; } else { satIOContext->NotifyOS = agFALSE; } /* * Our SAT supports RESET LUN and partially support ABORT TASK (only if there * is no more than one I/O pending on the drive. */ if (task == AG_LOGICAL_UNIT_RESET) { status = satTmResetLUN( tiRoot, currentTaskTag, tiDeviceHandle, agNULL, satIOContext, lun); return status; } #ifdef TO_BE_REMOVED else if (task == AG_TARGET_WARM_RESET) { status = satTmWarmReset( tiRoot, currentTaskTag, tiDeviceHandle, agNULL, satIOContext); return status; } #endif else if (task == AG_ABORT_TASK) { status = satTmAbortTask( tiRoot, currentTaskTag, tiDeviceHandle, agNULL, satIOContext, taskTag); return status; } else if (task == TD_INTERNAL_TM_RESET) { status = satTDInternalTmReset( tiRoot, currentTaskTag, tiDeviceHandle, agNULL, satIOContext); return status; } else { TI_DBG1(("satTM: tiDeviceHandle=%p UNSUPPORTED TM task=0x%x\n", tiDeviceHandle, task )); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tiRequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return tiError; } } /***************************************************************************** *! \brief satTmResetLUN * * This routine is called to initiate a TM RESET LUN request to SATL. * This routine is independent of HW/LL API. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param lun: Pointer to LUN. * \param currentTaskTag: Pointer to tag/context for this TM request. * * \return: * * \e tiSuccess: I/O request successfully initiated. * \e tiBusy: No resources available, try again later. * \e tiIONoDevice: Invalid device handle. * \e tiError: Other errors that prevent the I/O request to be started. * * *****************************************************************************/ osGLOBAL bit32 satTmResetLUN( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, /* current task tag */ tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext, tiLUN_t *lun) { tdsaDeviceData_t *tdsaDeviceData; satDeviceData_t *satDevData; tdsaDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; satDevData = &tdsaDeviceData->satDevData; TI_DBG1(("satTmResetLUN: tiDeviceHandle=%p.\n", tiDeviceHandle )); /* * Only support LUN 0 */ if ( (lun->lun[0] | lun->lun[1] | lun->lun[2] | lun->lun[3] | lun->lun[4] | lun->lun[5] | lun->lun[6] | lun->lun[7] ) != 0 ) { TI_DBG1(("satTmResetLUN: *** REJECT *** LUN not zero, tiDeviceHandle=%p\n", tiDeviceHandle)); return tiError; } /* * Check if there is other TM request pending */ if (satDevData->satTmTaskTag != agNULL) { TI_DBG1(("satTmResetLUN: *** REJECT *** other TM pending, tiDeviceHandle=%p\n", tiDeviceHandle)); return tiError; } /* * Save tiIORequest, will be returned at device reset completion to return * the TM completion. */ satDevData->satTmTaskTag = tiIORequest; /* * Set flag to indicate device in recovery mode. */ satDevData->satDriveState = SAT_DEV_STATE_IN_RECOVERY; /* * Issue SATA device reset. Set flag to indicate NOT to automatically abort * at the completion of SATA device reset. */ satDevData->satAbortAfterReset = agFALSE; /* SAT rev8 6.3.6 p22 */ satStartResetDevice( tiRoot, tiIORequest, /* currentTaskTag */ tiDeviceHandle, tiScsiRequest, satIOContext ); return tiSuccess; } /***************************************************************************** *! \brief satTmWarmReset * * This routine is called to initiate a TM warm RESET request to SATL. * This routine is independent of HW/LL API. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param currentTaskTag: Pointer to tag/context for this TM request. * * \return: * * \e tiSuccess: I/O request successfully initiated. * \e tiBusy: No resources available, try again later. * \e tiIONoDevice: Invalid device handle. * \e tiError: Other errors that prevent the I/O request to be started. * * *****************************************************************************/ osGLOBAL bit32 satTmWarmReset( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, /* current task tag */ tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { tdsaDeviceData_t *tdsaDeviceData; satDeviceData_t *satDevData; tdsaDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; satDevData = &tdsaDeviceData->satDevData; TI_DBG1(("satTmWarmReset: tiDeviceHandle=%p.\n", tiDeviceHandle )); /* * Check if there is other TM request pending */ if (satDevData->satTmTaskTag != agNULL) { TI_DBG1(("satTmWarmReset: *** REJECT *** other TM pending, tiDeviceHandle=%p\n", tiDeviceHandle)); return tiError; } /* * Save tiIORequest, will be returned at device reset completion to return * the TM completion. */ satDevData->satTmTaskTag = tiIORequest; /* * Set flag to indicate device in recovery mode. */ satDevData->satDriveState = SAT_DEV_STATE_IN_RECOVERY; /* * Issue SATA device reset. Set flag to indicate NOT to automatically abort * at the completion of SATA device reset. */ satDevData->satAbortAfterReset = agFALSE; /* SAT rev8 6.3.6 p22 */ satStartResetDevice( tiRoot, tiIORequest, /* currentTaskTag */ tiDeviceHandle, tiScsiRequest, satIOContext ); return tiSuccess; } osGLOBAL bit32 satTDInternalTmReset( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, /* current task tag */ tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { tdsaDeviceData_t *tdsaDeviceData; satDeviceData_t *satDevData; tdsaDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; satDevData = &tdsaDeviceData->satDevData; TI_DBG1(("satTmWarmReset: tiDeviceHandle=%p.\n", tiDeviceHandle )); /* * Check if there is other TM request pending */ if (satDevData->satTmTaskTag != agNULL) { TI_DBG1(("satTmWarmReset: *** REJECT *** other TM pending, tiDeviceHandle=%p\n", tiDeviceHandle)); return tiError; } /* * Save tiIORequest, will be returned at device reset completion to return * the TM completion. */ satDevData->satTmTaskTag = tiIORequest; /* * Set flag to indicate device in recovery mode. */ satDevData->satDriveState = SAT_DEV_STATE_IN_RECOVERY; /* * Issue SATA device reset. Set flag to indicate NOT to automatically abort * at the completion of SATA device reset. */ satDevData->satAbortAfterReset = agFALSE; /* SAT rev8 6.3.6 p22 */ satStartResetDevice( tiRoot, tiIORequest, /* currentTaskTag */ tiDeviceHandle, tiScsiRequest, satIOContext ); return tiSuccess; } /***************************************************************************** *! \brief satTmAbortTask * * This routine is called to initiate a TM ABORT TASK request to SATL. * This routine is independent of HW/LL API. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param taskTag: Pointer to the associated task where the TM * command is to be applied. * \param currentTaskTag: Pointer to tag/context for this TM request. * * \return: * * \e tiSuccess: I/O request successfully initiated. * \e tiBusy: No resources available, try again later. * \e tiIONoDevice: Invalid device handle. * \e tiError: Other errors that prevent the I/O request to be started. * * *****************************************************************************/ osGLOBAL bit32 satTmAbortTask( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, /* current task tag */ tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, /* NULL */ satIOContext_t *satIOContext, tiIORequest_t *taskTag) { tdsaDeviceData_t *tdsaDeviceData; satDeviceData_t *satDevData; satIOContext_t *satTempIOContext = agNULL; tdIORequestBody_t *tdIORequestBody; tdIORequestBody_t *TMtdIORequestBody; tdList_t *elementHdr; bit32 found = agFALSE; tiIORequest_t *tiIOReq; tdsaDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; satDevData = &tdsaDeviceData->satDevData; TMtdIORequestBody = (tdIORequestBody_t *)tiIORequest->tdData; TI_DBG1(("satTmAbortTask: tiDeviceHandle=%p taskTag=%p.\n", tiDeviceHandle, taskTag )); /* * Check if there is other TM request pending */ if (satDevData->satTmTaskTag != agNULL) { TI_DBG1(("satTmAbortTask: REJECT other TM pending, tiDeviceHandle=%p\n", tiDeviceHandle)); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, TMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return tiError; } #ifdef REMOVED /* * Check if there is only one I/O pending. */ if (satDevData->satPendingIO > 0) { TI_DBG1(("satTmAbortTask: REJECT num pending I/O, tiDeviceHandle=%p, satPendingIO=0x%x\n", tiDeviceHandle, satDevData->satPendingIO)); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, TMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return tiError; } #endif /* * Check that the only pending I/O matches taskTag. If not return tiError. */ elementHdr = satDevData->satIoLinkList.flink; while (elementHdr != &satDevData->satIoLinkList) { satTempIOContext = TDLIST_OBJECT_BASE( satIOContext_t, satIoContextLink, elementHdr ); tdIORequestBody = (tdIORequestBody_t *) satTempIOContext->tiRequestBody; tiIOReq = tdIORequestBody->tiIORequest; elementHdr = elementHdr->flink; /* for the next while loop */ /* * Check if the tag matches */ if ( tiIOReq == taskTag) { found = agTRUE; satIOContext->satToBeAbortedIOContext = satTempIOContext; TI_DBG1(("satTmAbortTask: found matching tag.\n")); break; } /* if matching tag */ } /* while loop */ if (found == agFALSE ) { TI_DBG1(("satTmAbortTask: *** REJECT *** no match, tiDeviceHandle=%p\n", tiDeviceHandle )); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, TMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return tiError; } /* * Save tiIORequest, will be returned at device reset completion to return * the TM completion. */ satDevData->satTmTaskTag = tiIORequest; /* * Set flag to indicate device in recovery mode. */ satDevData->satDriveState = SAT_DEV_STATE_IN_RECOVERY; /* * Issue SATA device reset or check power mode. Set flag to to automatically abort * at the completion of SATA device reset. * SAT r09 p25 */ satDevData->satAbortAfterReset = agTRUE; if ( (satTempIOContext->reqType == AGSA_SATA_PROTOCOL_FPDMA_WRITE) || (satTempIOContext->reqType == AGSA_SATA_PROTOCOL_FPDMA_READ) ) { TI_DBG1(("satTmAbortTask: calling satStartCheckPowerMode\n")); /* send check power mode */ satStartCheckPowerMode( tiRoot, tiIORequest, /* currentTaskTag */ tiDeviceHandle, tiScsiRequest, satIOContext ); } else { TI_DBG1(("satTmAbortTask: calling satStartResetDevice\n")); /* send AGSA_SATA_PROTOCOL_SRST_ASSERT */ satStartResetDevice( tiRoot, tiIORequest, /* currentTaskTag */ tiDeviceHandle, tiScsiRequest, satIOContext ); } return tiSuccess; } /***************************************************************************** *! \brief osSatResetCB * * This routine is called to notify the completion of SATA device reset * which was initiated previously through the call to sataLLReset(). * This routine is independent of HW/LL API. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param resetStatus: Reset status either tiSuccess or tiError. * \param respFis: Pointer to the Register Device-To-Host FIS * received from the device. * * \return: None * *****************************************************************************/ osGLOBAL void osSatResetCB( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle, bit32 resetStatus, void *respFis) { agsaRoot_t *agRoot; tdsaDeviceData_t *tdsaDeviceData; satDeviceData_t *satDevData; satIOContext_t *satIOContext; tdIORequestBody_t *tdIORequestBodyTmp; tdList_t *elementHdr; agsaIORequest_t *agAbortIORequest; tdIORequestBody_t *tdAbortIORequestBody; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; void *osMemHandle; tdsaDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; agRoot = tdsaDeviceData->agRoot; satDevData = &tdsaDeviceData->satDevData; TI_DBG5(("osSatResetCB: tiDeviceHandle=%p resetStatus=0x%x\n", tiDeviceHandle, resetStatus )); /* We may need to check FIS to check device operating condition */ /* * Check if need to abort all pending I/Os */ if ( satDevData->satAbortAfterReset == agTRUE ) { /* * Issue abort to LL layer to all other pending I/Os for the same SATA drive */ elementHdr = satDevData->satIoLinkList.flink; while (elementHdr != &satDevData->satIoLinkList) { satIOContext = TDLIST_OBJECT_BASE( satIOContext_t, satIoContextLink, elementHdr ); tdIORequestBodyTmp = (tdIORequestBody_t *)satIOContext->tiRequestBody; /* * Issue abort */ TI_DBG5(("osSatResetCB: issuing ABORT tiDeviceHandle=%p agIORequest=%p\n", tiDeviceHandle, &tdIORequestBodyTmp->agIORequest )); /* allocating agIORequest for abort itself */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdAbortIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { /* let os process IO */ TI_DBG1(("osSatResetCB: ostiAllocMemory failed...\n")); return; } if (tdAbortIORequestBody == agNULL) { /* let os process IO */ TI_DBG1(("osSatResetCB: ostiAllocMemory returned NULL tdAbortIORequestBody\n")); return; } /* setup task management structure */ tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; tdAbortIORequestBody->tiDevHandle = tiDeviceHandle; /* initialize agIORequest */ agAbortIORequest = &(tdAbortIORequestBody->agIORequest); agAbortIORequest->osData = (void *) tdAbortIORequestBody; agAbortIORequest->sdkData = agNULL; /* LL takes care of this */ saSATAAbort( agRoot, agAbortIORequest, 0, agNULL, 0, &(tdIORequestBodyTmp->agIORequest), agNULL ); elementHdr = elementHdr->flink; /* for the next while loop */ } /* while */ /* Reset flag */ satDevData->satAbortAfterReset = agFALSE; } /* * Check if the device reset if the result of TM request. */ if ( satDevData->satTmTaskTag != agNULL ) { TI_DBG5(("osSatResetCB: calling TM completion tiDeviceHandle=%p satTmTaskTag=%p\n", tiDeviceHandle, satDevData->satTmTaskTag )); ostiInitiatorEvent( tiRoot, agNULL, /* portalContext not used */ tiDeviceHandle, tiIntrEventTypeTaskManagement, tiTMOK, satDevData->satTmTaskTag); /* * Reset flag */ satDevData->satTmTaskTag = agNULL; } } /***************************************************************************** *! \brief osSatIOCompleted * * This routine is a callback for SATA completion that required FIS status * translation to SCSI status. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param respFis: Pointer to status FIS to read. * \param respFisLen: Length of response FIS to read. * \param satIOContext: Pointer to SAT context. * \param interruptContext: Interrupt context * * \return: None * *****************************************************************************/ osGLOBAL void osSatIOCompleted( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, agsaFisHeader_t *agFirstDword, bit32 respFisLen, agsaFrameHandle_t agFrameHandle, satIOContext_t *satIOContext, bit32 interruptContext) { satDeviceData_t *pSatDevData; scsiRspSense_t *pSense; #ifdef TD_DEBUG_ENABLE tiIniScsiCmnd_t *pScsiCmnd; #endif agsaFisRegHostToDevice_t *hostToDevFis = agNULL; bit32 ataStatus = 0; bit32 ataError; satInternalIo_t *satIntIo = agNULL; bit32 status; tiDeviceHandle_t *tiDeviceHandle; satIOContext_t *satIOContext2; tdIORequestBody_t *tdIORequestBody; agsaFisRegD2HHeader_t *statDevToHostFisHeader = agNULL; agsaFisSetDevBitsHeader_t *statSetDevBitFisHeader = agNULL; tiIORequest_t tiIORequestTMP; pSense = satIOContext->pSense; pSatDevData = satIOContext->pSatDevData; #ifdef TD_DEBUG_ENABLE pScsiCmnd = satIOContext->pScsiCmnd; #endif hostToDevFis = satIOContext->pFis; tiDeviceHandle = &((tdsaDeviceData_t *)(pSatDevData->satSaDeviceData))->tiDeviceHandle; /* * Find out the type of response FIS: * Set Device Bit FIS or Reg Device To Host FIS. */ /* First assume it is Reg Device to Host FIS */ statDevToHostFisHeader = (agsaFisRegD2HHeader_t *)&(agFirstDword->D2H); ataStatus = statDevToHostFisHeader->status; /* ATA Status register */ ataError = statDevToHostFisHeader->error; /* ATA Eror register */ /* for debugging */ TI_DBG1(("osSatIOCompleted: H to D command 0x%x\n", hostToDevFis->h.command)); TI_DBG1(("osSatIOCompleted: D to H fistype 0x%x\n", statDevToHostFisHeader->fisType)); if (statDevToHostFisHeader->fisType == SET_DEV_BITS_FIS) { /* It is Set Device Bits FIS */ statSetDevBitFisHeader = (agsaFisSetDevBitsHeader_t *)&(agFirstDword->D2H); /* Get ATA Status register */ ataStatus = (statSetDevBitFisHeader->statusHi_Lo & 0x70); /* bits 4,5,6 */ ataStatus = ataStatus | (statSetDevBitFisHeader->statusHi_Lo & 0x07); /* bits 0,1,2 */ /* ATA Eror register */ ataError = statSetDevBitFisHeader->error; statDevToHostFisHeader = agNULL; } else if (statDevToHostFisHeader->fisType != REG_DEV_TO_HOST_FIS) { TI_DBG1(("osSatIOCompleted: *** UNEXPECTED RESP FIS TYPE 0x%x *** tiIORequest=%p\n", statDevToHostFisHeader->fisType, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_HARDWARE_ERROR, 0, SCSI_SNSCODE_INTERNAL_TARGET_FAILURE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, interruptContext ); return; } if ( ataStatus & DF_ATA_STATUS_MASK ) { pSatDevData->satDeviceFaultState = agTRUE; } else { pSatDevData->satDeviceFaultState = agFALSE; } TI_DBG5(("osSatIOCompleted: tiIORequest=%p CDB=0x%x ATA CMD =0x%x\n", tiIORequest, pScsiCmnd->cdb[0], hostToDevFis->h.command)); /* * Decide which ATA command is the translation needed */ switch(hostToDevFis->h.command) { case SAT_READ_FPDMA_QUEUED: case SAT_WRITE_FPDMA_QUEUED: /************************************************************************ * * !!!! See Section 13.5.2.4 of SATA 2.5 specs. !!!! * !!!! If the NCQ error ends up here, it means that the device sent !!!! * !!!! Set Device Bit FIS (which has SActive register) instead of !!!! * !!!! Register Device To Host FIS (which does not have SActive !!!! * !!!! register). The callback ossaSATAEvent() deals with the case !!!! * !!!! where Register Device To Host FIS was sent by the device. !!!! * * For NCQ we need to issue READ LOG EXT command with log page 10h * to get the error and to allow other I/Os to continue. * * Here is the basic flow or sequence of error recovery, note that due * to the SATA HW assist that we have, this sequence is slighly different * from the one described in SATA 2.5: * * 1. Set SATA device flag to indicate error condition and returning busy * for all new request. * return tiSuccess; * 2. Because the HW/LL layer received Set Device Bit FIS, it can get the * tag or I/O context for NCQ request, SATL would translate the ATA error * to SCSI status and return the original NCQ I/O with the appopriate * SCSI status. * * 3. Prepare READ LOG EXT page 10h command. Set flag to indicate that * the failed I/O has been returned to the OS Layer. Send command. * * 4. When the device receives READ LOG EXT page 10h request all other * pending I/O are implicitly aborted. No completion (aborted) status * will be sent to the host for these aborted commands. * * 5. SATL receives the completion for READ LOG EXT command in * satReadLogExtCB(). Steps 6,7,8,9 below are the step 1,2,3,4 in * satReadLogExtCB(). * * 6. Check flag that indicates whether the failed I/O has been returned * to the OS Layer. If not, search the I/O context in device data * looking for a matched tag. Then return the completion of the failed * NCQ command with the appopriate/trasnlated SCSI status. * * 7. Issue abort to LL layer to all other pending I/Os for the same SATA * drive. * * 8. Free resource allocated for the internally generated READ LOG EXT. * * 9. At the completion of abort, in the context of ossaSATACompleted(), * return the I/O with error status to the OS-App Specific layer. * When all I/O aborts are completed, clear SATA device flag to * indicate ready to process new request. * ***********************************************************************/ TI_DBG1(("osSatIOCompleted: NCQ ERROR tiIORequest=%p ataStatus=0x%x ataError=0x%x\n", tiIORequest, ataStatus, ataError )); /* Set flag to indicate we are in recovery */ pSatDevData->satDriveState = SAT_DEV_STATE_IN_RECOVERY; /* Return the failed NCQ I/O to OS-Apps Specifiic layer */ osSatDefaultTranslation( tiRoot, tiIORequest, satIOContext, pSense, (bit8)ataStatus, (bit8)ataError, interruptContext ); /* * Allocate resource for READ LOG EXT page 10h */ satIntIo = satAllocIntIoResource( tiRoot, &(tiIORequestTMP), /* anything but NULL */ pSatDevData, sizeof (satReadLogExtPage10h_t), satIntIo); if (satIntIo == agNULL) { TI_DBG1(("osSatIOCompleted: can't send RLE due to resource lack\n")); /* Abort I/O after completion of device reset */ pSatDevData->satAbortAfterReset = agTRUE; #ifdef NOT_YET /* needs further investigation */ /* no report to OS layer */ satSubTM(tiRoot, tiDeviceHandle, TD_INTERNAL_TM_RESET, agNULL, agNULL, agNULL, agFALSE); #endif TI_DBG1(("osSatIOCompleted: calling saSATADeviceReset 1\n")); return; } /* * Set flag to indicate that the failed I/O has been returned to the * OS-App specific Layer. */ satIntIo->satIntFlag = AG_SAT_INT_IO_FLAG_ORG_IO_COMPLETED; /* compare to satPrepareNewIO() */ /* Send READ LOG EXIT page 10h command */ /* * Need to initialize all the fields within satIOContext except * reqType and satCompleteCB which will be set depending on cmd. */ tdIORequestBody = (tdIORequestBody_t *)satIntIo->satIntRequestBody; satIOContext2 = &(tdIORequestBody->transport.SATA.satIOContext); satIOContext2->pSatDevData = pSatDevData; satIOContext2->pFis = &(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev); satIOContext2->pScsiCmnd = &(satIntIo->satIntTiScsiXchg.scsiCmnd); satIOContext2->pSense = &(tdIORequestBody->transport.SATA.sensePayload); satIOContext2->pTiSenseData = &(tdIORequestBody->transport.SATA.tiSenseData); satIOContext2->pTiSenseData->senseData = satIOContext2->pSense; satIOContext2->tiRequestBody = satIntIo->satIntRequestBody; satIOContext2->interruptContext = interruptContext; satIOContext2->satIntIoContext = satIntIo; satIOContext2->ptiDeviceHandle = tiDeviceHandle; satIOContext2->satOrgIOContext = agNULL; satIOContext2->tiScsiXchg = agNULL; status = satSendReadLogExt( tiRoot, &satIntIo->satIntTiIORequest, tiDeviceHandle, &satIntIo->satIntTiScsiXchg, satIOContext2); if (status != tiSuccess) { TI_DBG1(("osSatIOCompleted: can't send RLE due to LL api failure\n")); satFreeIntIoResource( tiRoot, pSatDevData, satIntIo); /* Abort I/O after completion of device reset */ pSatDevData->satAbortAfterReset = agTRUE; #ifdef NOT_YET /* needs further investigation */ /* no report to OS layer */ satSubTM(tiRoot, tiDeviceHandle, TD_INTERNAL_TM_RESET, agNULL, agNULL, agNULL, agFALSE); #endif TI_DBG1(("osSatIOCompleted: calling saSATADeviceReset 2\n")); return; } break; case SAT_READ_DMA_EXT: /* fall through */ /* Use default status/error translation */ case SAT_READ_DMA: /* fall through */ /* Use default status/error translation */ default: osSatDefaultTranslation( tiRoot, tiIORequest, satIOContext, pSense, (bit8)ataStatus, (bit8)ataError, interruptContext ); break; } /* end switch */ } /*****************************************************************************/ /*! \brief SAT implementation for SCSI STANDARD INQUIRY. * * SAT implementation for SCSI STANDARD INQUIRY. * * \param pInquiry: Pointer to Inquiry Data buffer. * \param pSATAIdData: Pointer to ATA IDENTIFY DEVICE data. * * \return None. */ /*****************************************************************************/ GLOBAL void satInquiryStandard( bit8 *pInquiry, agsaSATAIdentifyData_t *pSATAIdData, tiIniScsiCmnd_t *scsiCmnd ) { tiLUN_t *pLun; pLun = &scsiCmnd->lun; /* Assumption: Basic Task Mangement is supported -> BQUE 1 and CMDQUE 0, SPC-4, Table96, p147 */ /* See SPC-4, 6.4.2, p 143 and SAT revision 8, 8.1.2, p 28 */ TI_DBG5(("satInquiryStandard: start\n")); if (pInquiry == agNULL) { TI_DBG1(("satInquiryStandard: pInquiry is NULL, wrong\n")); return; } else { TI_DBG5(("satInquiryStandard: pInquiry is NOT NULL\n")); } /* * Reject all other LUN other than LUN 0. */ if ( ((pLun->lun[0] | pLun->lun[1] | pLun->lun[2] | pLun->lun[3] | pLun->lun[4] | pLun->lun[5] | pLun->lun[6] | pLun->lun[7] ) != 0) ) { /* SAT Spec Table 8, p27, footnote 'a' */ pInquiry[0] = 0x7F; } else { pInquiry[0] = 0x00; } if (pSATAIdData->rm_ataDevice & ATA_REMOVABLE_MEDIA_DEVICE_MASK ) { pInquiry[1] = 0x80; } else { pInquiry[1] = 0x00; } pInquiry[2] = 0x05; /* SPC-3 */ pInquiry[3] = 0x12; /* set HiSup 1; resp data format set to 2 */ pInquiry[4] = 0x1F; /* 35 - 4 = 31; Additional length */ pInquiry[5] = 0x00; /* The following two are for task management. SAT Rev8, p20 */ if (pSATAIdData->sataCapabilities & 0x100) { /* NCQ supported; multiple outstanding SCSI IO are supported */ pInquiry[6] = 0x00; /* BQUE bit is not set */ pInquiry[7] = 0x02; /* CMDQUE bit is set */ } else { pInquiry[6] = 0x80; /* BQUE bit is set */ pInquiry[7] = 0x00; /* CMDQUE bit is not set */ } /* * Vendor ID. */ osti_strncpy((char*)&pInquiry[8], AG_SAT_VENDOR_ID_STRING,8); /* 8 bytes */ /* * Product ID */ /* when flipped by LL */ pInquiry[16] = pSATAIdData->modelNumber[1]; pInquiry[17] = pSATAIdData->modelNumber[0]; pInquiry[18] = pSATAIdData->modelNumber[3]; pInquiry[19] = pSATAIdData->modelNumber[2]; pInquiry[20] = pSATAIdData->modelNumber[5]; pInquiry[21] = pSATAIdData->modelNumber[4]; pInquiry[22] = pSATAIdData->modelNumber[7]; pInquiry[23] = pSATAIdData->modelNumber[6]; pInquiry[24] = pSATAIdData->modelNumber[9]; pInquiry[25] = pSATAIdData->modelNumber[8]; pInquiry[26] = pSATAIdData->modelNumber[11]; pInquiry[27] = pSATAIdData->modelNumber[10]; pInquiry[28] = pSATAIdData->modelNumber[13]; pInquiry[29] = pSATAIdData->modelNumber[12]; pInquiry[30] = pSATAIdData->modelNumber[15]; pInquiry[31] = pSATAIdData->modelNumber[14]; /* when flipped */ /* * Product Revision level. */ /* * If the IDENTIFY DEVICE data received in words 25 and 26 from the ATA * device are ASCII spaces (20h), do this translation. */ if ( (pSATAIdData->firmwareVersion[4] == 0x20 ) && (pSATAIdData->firmwareVersion[5] == 0x00 ) && (pSATAIdData->firmwareVersion[6] == 0x20 ) && (pSATAIdData->firmwareVersion[7] == 0x00 ) ) { pInquiry[32] = pSATAIdData->firmwareVersion[1]; pInquiry[33] = pSATAIdData->firmwareVersion[0]; pInquiry[34] = pSATAIdData->firmwareVersion[3]; pInquiry[35] = pSATAIdData->firmwareVersion[2]; } else { pInquiry[32] = pSATAIdData->firmwareVersion[5]; pInquiry[33] = pSATAIdData->firmwareVersion[4]; pInquiry[34] = pSATAIdData->firmwareVersion[7]; pInquiry[35] = pSATAIdData->firmwareVersion[6]; } #ifdef REMOVED /* * Product ID */ /* when flipped by LL */ pInquiry[16] = pSATAIdData->modelNumber[0]; pInquiry[17] = pSATAIdData->modelNumber[1]; pInquiry[18] = pSATAIdData->modelNumber[2]; pInquiry[19] = pSATAIdData->modelNumber[3]; pInquiry[20] = pSATAIdData->modelNumber[4]; pInquiry[21] = pSATAIdData->modelNumber[5]; pInquiry[22] = pSATAIdData->modelNumber[6]; pInquiry[23] = pSATAIdData->modelNumber[7]; pInquiry[24] = pSATAIdData->modelNumber[8]; pInquiry[25] = pSATAIdData->modelNumber[9]; pInquiry[26] = pSATAIdData->modelNumber[10]; pInquiry[27] = pSATAIdData->modelNumber[11]; pInquiry[28] = pSATAIdData->modelNumber[12]; pInquiry[29] = pSATAIdData->modelNumber[13]; pInquiry[30] = pSATAIdData->modelNumber[14]; pInquiry[31] = pSATAIdData->modelNumber[15]; /* when flipped */ /* * Product Revision level. */ /* * If the IDENTIFY DEVICE data received in words 25 and 26 from the ATA * device are ASCII spaces (20h), do this translation. */ if ( (pSATAIdData->firmwareVersion[4] == 0x20 ) && (pSATAIdData->firmwareVersion[5] == 0x00 ) && (pSATAIdData->firmwareVersion[6] == 0x20 ) && (pSATAIdData->firmwareVersion[7] == 0x00 ) ) { pInquiry[32] = pSATAIdData->firmwareVersion[0]; pInquiry[33] = pSATAIdData->firmwareVersion[1]; pInquiry[34] = pSATAIdData->firmwareVersion[2]; pInquiry[35] = pSATAIdData->firmwareVersion[3]; } else { pInquiry[32] = pSATAIdData->firmwareVersion[4]; pInquiry[33] = pSATAIdData->firmwareVersion[5]; pInquiry[34] = pSATAIdData->firmwareVersion[6]; pInquiry[35] = pSATAIdData->firmwareVersion[7]; } #endif TI_DBG5(("satInquiryStandard: end\n")); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI INQUIRY page 0. * * SAT implementation for SCSI INQUIRY page 0. * * \param pInquiry: Pointer to Inquiry Data buffer. * \param pSATAIdData: Pointer to ATA IDENTIFY DEVICE data. * * \return None. */ /*****************************************************************************/ GLOBAL void satInquiryPage0( bit8 *pInquiry, agsaSATAIdentifyData_t *pSATAIdData) { TI_DBG5(("satInquiryPage0: entry\n")); /* See SPC-4, 7.6.9, p 345 and SAT revision 8, 10.3.2, p 77 */ pInquiry[0] = 0x00; pInquiry[1] = 0x00; /* page code */ pInquiry[2] = 0x00; /* reserved */ pInquiry[3] = 7 - 3; /* last index(in this case, 6) - 3; page length */ /* supported vpd page list */ pInquiry[4] = 0x00; /* page 0x00 supported */ pInquiry[5] = 0x80; /* page 0x80 supported */ pInquiry[6] = 0x83; /* page 0x83 supported */ pInquiry[7] = 0x89; /* page 0x89 supported */ } /*****************************************************************************/ /*! \brief SAT implementation for SCSI INQUIRY page 83. * * SAT implementation for SCSI INQUIRY page 83. * * \param pInquiry: Pointer to Inquiry Data buffer. * \param pSATAIdData: Pointer to ATA IDENTIFY DEVICE data. * * \return None. */ /*****************************************************************************/ GLOBAL void satInquiryPage83( bit8 *pInquiry, agsaSATAIdentifyData_t *pSATAIdData, satDeviceData_t *pSatDevData) { satSimpleSATAIdentifyData_t *pSimpleData; /* * When translating the fields, in some cases using the simple form of SATA * Identify Device Data is easier. So we define it here. * Both pSimpleData and pSATAIdData points to the same data. */ pSimpleData = ( satSimpleSATAIdentifyData_t *)pSATAIdData; TI_DBG5(("satInquiryPage83: entry\n")); pInquiry[0] = 0x00; pInquiry[1] = 0x83; /* page code */ pInquiry[2] = 0; /* Reserved */ /* * If the ATA device returns word 87 bit 8 set to one in its IDENTIFY DEVICE * data indicating that it supports the WORLD WIDE NAME field * (i.e., words 108-111), the SATL shall include an identification descriptor * containing a logical unit name. */ if ( pSatDevData->satWWNSupport) { /* Fill in SAT Rev8 Table85 */ /* * Logical unit name derived from the world wide name. */ pInquiry[3] = 12; /* 15-3; page length, no addition ID descriptor assumed*/ /* * Identifier descriptor */ pInquiry[4] = 0x01; /* Code set: binary codes */ pInquiry[5] = 0x03; /* Identifier type : NAA */ pInquiry[6] = 0x00; /* Reserved */ pInquiry[7] = 0x08; /* Identifier length */ /* Bit 4-7 NAA field, bit 0-3 MSB of IEEE Company ID */ pInquiry[8] = (bit8)((pSATAIdData->namingAuthority) >> 8); pInquiry[9] = (bit8)((pSATAIdData->namingAuthority) & 0xFF); /* IEEE Company ID */ pInquiry[10] = (bit8)((pSATAIdData->namingAuthority1) >> 8); /* IEEE Company ID */ /* Bit 4-7 LSB of IEEE Company ID, bit 0-3 MSB of Vendor Specific ID */ pInquiry[11] = (bit8)((pSATAIdData->namingAuthority1) & 0xFF); pInquiry[12] = (bit8)((pSATAIdData->uniqueID_bit16_31) >> 8); /* Vendor Specific ID */ pInquiry[13] = (bit8)((pSATAIdData->uniqueID_bit16_31) & 0xFF); /* Vendor Specific ID */ pInquiry[14] = (bit8)((pSATAIdData->uniqueID_bit0_15) >> 8); /* Vendor Specific ID */ pInquiry[15] = (bit8)((pSATAIdData->uniqueID_bit0_15) & 0xFF); /* Vendor Specific ID */ } else { /* Fill in SAT Rev8 Table86 */ /* * Logical unit name derived from the model number and serial number. */ pInquiry[3] = 72; /* 75 - 3; page length */ /* * Identifier descriptor */ pInquiry[4] = 0x02; /* Code set: ASCII codes */ pInquiry[5] = 0x01; /* Identifier type : T10 vendor ID based */ pInquiry[6] = 0x00; /* Reserved */ pInquiry[7] = 0x44; /* 0x44, 68 Identifier length */ /* Byte 8 to 15 is the vendor id string 'ATA '. */ osti_strncpy((char *)&pInquiry[8], AG_SAT_VENDOR_ID_STRING, 8); /* * Byte 16 to 75 is vendor specific id */ pInquiry[16] = (bit8)((pSimpleData->word[27]) >> 8); pInquiry[17] = (bit8)((pSimpleData->word[27]) & 0x00ff); pInquiry[18] = (bit8)((pSimpleData->word[28]) >> 8); pInquiry[19] = (bit8)((pSimpleData->word[28]) & 0x00ff); pInquiry[20] = (bit8)((pSimpleData->word[29]) >> 8); pInquiry[21] = (bit8)((pSimpleData->word[29]) & 0x00ff); pInquiry[22] = (bit8)((pSimpleData->word[30]) >> 8); pInquiry[23] = (bit8)((pSimpleData->word[30]) & 0x00ff); pInquiry[24] = (bit8)((pSimpleData->word[31]) >> 8); pInquiry[25] = (bit8)((pSimpleData->word[31]) & 0x00ff); pInquiry[26] = (bit8)((pSimpleData->word[32]) >> 8); pInquiry[27] = (bit8)((pSimpleData->word[32]) & 0x00ff); pInquiry[28] = (bit8)((pSimpleData->word[33]) >> 8); pInquiry[29] = (bit8)((pSimpleData->word[33]) & 0x00ff); pInquiry[30] = (bit8)((pSimpleData->word[34]) >> 8); pInquiry[31] = (bit8)((pSimpleData->word[34]) & 0x00ff); pInquiry[32] = (bit8)((pSimpleData->word[35]) >> 8); pInquiry[33] = (bit8)((pSimpleData->word[35]) & 0x00ff); pInquiry[34] = (bit8)((pSimpleData->word[36]) >> 8); pInquiry[35] = (bit8)((pSimpleData->word[36]) & 0x00ff); pInquiry[36] = (bit8)((pSimpleData->word[37]) >> 8); pInquiry[37] = (bit8)((pSimpleData->word[37]) & 0x00ff); pInquiry[38] = (bit8)((pSimpleData->word[38]) >> 8); pInquiry[39] = (bit8)((pSimpleData->word[38]) & 0x00ff); pInquiry[40] = (bit8)((pSimpleData->word[39]) >> 8); pInquiry[41] = (bit8)((pSimpleData->word[39]) & 0x00ff); pInquiry[42] = (bit8)((pSimpleData->word[40]) >> 8); pInquiry[43] = (bit8)((pSimpleData->word[40]) & 0x00ff); pInquiry[44] = (bit8)((pSimpleData->word[41]) >> 8); pInquiry[45] = (bit8)((pSimpleData->word[41]) & 0x00ff); pInquiry[46] = (bit8)((pSimpleData->word[42]) >> 8); pInquiry[47] = (bit8)((pSimpleData->word[42]) & 0x00ff); pInquiry[48] = (bit8)((pSimpleData->word[43]) >> 8); pInquiry[49] = (bit8)((pSimpleData->word[43]) & 0x00ff); pInquiry[50] = (bit8)((pSimpleData->word[44]) >> 8); pInquiry[51] = (bit8)((pSimpleData->word[44]) & 0x00ff); pInquiry[52] = (bit8)((pSimpleData->word[45]) >> 8); pInquiry[53] = (bit8)((pSimpleData->word[45]) & 0x00ff); pInquiry[54] = (bit8)((pSimpleData->word[46]) >> 8); pInquiry[55] = (bit8)((pSimpleData->word[46]) & 0x00ff); pInquiry[56] = (bit8)((pSimpleData->word[10]) >> 8); pInquiry[57] = (bit8)((pSimpleData->word[10]) & 0x00ff); pInquiry[58] = (bit8)((pSimpleData->word[11]) >> 8); pInquiry[59] = (bit8)((pSimpleData->word[11]) & 0x00ff); pInquiry[60] = (bit8)((pSimpleData->word[12]) >> 8); pInquiry[61] = (bit8)((pSimpleData->word[12]) & 0x00ff); pInquiry[62] = (bit8)((pSimpleData->word[13]) >> 8); pInquiry[63] = (bit8)((pSimpleData->word[13]) & 0x00ff); pInquiry[64] = (bit8)((pSimpleData->word[14]) >> 8); pInquiry[65] = (bit8)((pSimpleData->word[14]) & 0x00ff); pInquiry[66] = (bit8)((pSimpleData->word[15]) >> 8); pInquiry[67] = (bit8)((pSimpleData->word[15]) & 0x00ff); pInquiry[68] = (bit8)((pSimpleData->word[16]) >> 8); pInquiry[69] = (bit8)((pSimpleData->word[16]) & 0x00ff); pInquiry[70] = (bit8)((pSimpleData->word[17]) >> 8); pInquiry[71] = (bit8)((pSimpleData->word[17]) & 0x00ff); pInquiry[72] = (bit8)((pSimpleData->word[18]) >> 8); pInquiry[73] = (bit8)((pSimpleData->word[18]) & 0x00ff); pInquiry[74] = (bit8)((pSimpleData->word[19]) >> 8); pInquiry[75] = (bit8)((pSimpleData->word[19]) & 0x00ff); } } /*****************************************************************************/ /*! \brief SAT implementation for SCSI INQUIRY page 89. * * SAT implementation for SCSI INQUIRY page 89. * * \param pInquiry: Pointer to Inquiry Data buffer. * \param pSATAIdData: Pointer to ATA IDENTIFY DEVICE data. * \param pSatDevData Pointer to internal device data structure * * \return None. */ /*****************************************************************************/ GLOBAL void satInquiryPage89( bit8 *pInquiry, agsaSATAIdentifyData_t *pSATAIdData, satDeviceData_t *pSatDevData) { /* SAT revision 8, 10.3.5, p 83 */ satSimpleSATAIdentifyData_t *pSimpleData; /* * When translating the fields, in some cases using the simple form of SATA * Identify Device Data is easier. So we define it here. * Both pSimpleData and pSATAIdData points to the same data. */ pSimpleData = ( satSimpleSATAIdentifyData_t *)pSATAIdData; TI_DBG5(("satInquiryPage89: start\n")); pInquiry[0] = 0x00; /* Peripheral Qualifier and Peripheral Device Type */ pInquiry[1] = 0x89; /* page code */ /* Page length 0x238 */ pInquiry[2] = 0x02; pInquiry[3] = 0x38; pInquiry[4] = 0x0; /* reserved */ pInquiry[5] = 0x0; /* reserved */ pInquiry[6] = 0x0; /* reserved */ pInquiry[7] = 0x0; /* reserved */ /* SAT Vendor Identification */ osti_strncpy((char*)&pInquiry[8], "PMC-SIERRA", 8); /* 8 bytes */ /* SAT Product Idetification */ osti_strncpy((char*)&pInquiry[16], "Tachyon-SPC ", 16); /* 16 bytes */ /* SAT Product Revision Level */ osti_strncpy((char*)&pInquiry[32], "01", 4); /* 4 bytes */ /* Signature, SAT revision8, Table88, p85 */ pInquiry[36] = 0x34; /* FIS type */ if (pSatDevData->satDeviceType == SATA_ATA_DEVICE) { /* interrupt assume to be 0 */ pInquiry[37] = (bit8)((pSatDevData->satPMField) >> (4 * 7)); /* first four bits of PM field */ } else { /* interrupt assume to be 1 */ pInquiry[37] = (bit8)(0x40 + (bit8)(((pSatDevData->satPMField) >> (4 * 7)))); /* first four bits of PM field */ } pInquiry[38] = 0; pInquiry[39] = 0; if (pSatDevData->satDeviceType == SATA_ATA_DEVICE) { pInquiry[40] = 0x01; /* LBA Low */ pInquiry[41] = 0x00; /* LBA Mid */ pInquiry[42] = 0x00; /* LBA High */ pInquiry[43] = 0x00; /* Device */ pInquiry[44] = 0x00; /* LBA Low Exp */ pInquiry[45] = 0x00; /* LBA Mid Exp */ pInquiry[46] = 0x00; /* LBA High Exp */ pInquiry[47] = 0x00; /* Reserved */ pInquiry[48] = 0x01; /* Sector Count */ pInquiry[49] = 0x00; /* Sector Count Exp */ } else { pInquiry[40] = 0x01; /* LBA Low */ pInquiry[41] = 0x00; /* LBA Mid */ pInquiry[42] = 0x00; /* LBA High */ pInquiry[43] = 0x00; /* Device */ pInquiry[44] = 0x00; /* LBA Low Exp */ pInquiry[45] = 0x00; /* LBA Mid Exp */ pInquiry[46] = 0x00; /* LBA High Exp */ pInquiry[47] = 0x00; /* Reserved */ pInquiry[48] = 0x01; /* Sector Count */ pInquiry[49] = 0x00; /* Sector Count Exp */ } /* Reserved */ pInquiry[50] = 0x00; pInquiry[51] = 0x00; pInquiry[52] = 0x00; pInquiry[53] = 0x00; pInquiry[54] = 0x00; pInquiry[55] = 0x00; /* Command Code */ if (pSatDevData->satDeviceType == SATA_ATA_DEVICE) { pInquiry[56] = 0xEC; /* IDENTIFY DEVICE */ } else { pInquiry[56] = 0xA1; /* IDENTIFY PACKET DEVICE */ } /* Reserved */ pInquiry[57] = 0x0; pInquiry[58] = 0x0; pInquiry[59] = 0x0; /* Identify Device */ osti_memcpy(&pInquiry[60], pSimpleData, sizeof(satSimpleSATAIdentifyData_t)); return; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI INQUIRY page 0. * * SAT implementation for SCSI INQUIRY page 0. * * \param pInquiry: Pointer to Inquiry Data buffer. * \param pSATAIdData: Pointer to ATA IDENTIFY DEVICE data. * * \return None. */ /*****************************************************************************/ GLOBAL void satInquiryPage80( bit8 *pInquiry, agsaSATAIdentifyData_t *pSATAIdData) { TI_DBG5(("satInquiryPage80: entry\n")); /* See SPC-4, 7.6.9, p 345 and SAT revision 8, 10.3.3, p 77 */ pInquiry[0] = 0x00; pInquiry[1] = 0x80; /* page code */ pInquiry[2] = 0x00; /* reserved */ pInquiry[3] = 0x14; /* page length */ /* supported vpd page list */ pInquiry[4] = pSATAIdData->serialNumber[1]; pInquiry[5] = pSATAIdData->serialNumber[0]; pInquiry[6] = pSATAIdData->serialNumber[3]; pInquiry[7] = pSATAIdData->serialNumber[2]; pInquiry[8] = pSATAIdData->serialNumber[5]; pInquiry[9] = pSATAIdData->serialNumber[4]; pInquiry[10] = pSATAIdData->serialNumber[7]; pInquiry[11] = pSATAIdData->serialNumber[6]; pInquiry[12] = pSATAIdData->serialNumber[9]; pInquiry[13] = pSATAIdData->serialNumber[8]; pInquiry[14] = pSATAIdData->serialNumber[11]; pInquiry[15] = pSATAIdData->serialNumber[10]; pInquiry[16] = pSATAIdData->serialNumber[13]; pInquiry[17] = pSATAIdData->serialNumber[12]; pInquiry[18] = pSATAIdData->serialNumber[15]; pInquiry[19] = pSATAIdData->serialNumber[14]; pInquiry[20] = pSATAIdData->serialNumber[17]; pInquiry[21] = pSATAIdData->serialNumber[16]; pInquiry[22] = pSATAIdData->serialNumber[19]; pInquiry[23] = pSATAIdData->serialNumber[18]; } /*****************************************************************************/ /*! \brief Send READ LOG EXT ATA PAGE 10h command to sata drive. * * Send READ LOG EXT ATA command PAGE 10h request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satSendReadLogExt( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; fis = satIOContext->pFis; TI_DBG1(("satSendReadLogExt: tiDeviceHandle=%p tiIORequest=%p\n", tiDeviceHandle, tiIORequest)); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_READ_LOG_EXT; /* 0x2F */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0x10; /* Page number */ fis->d.lbaMid = 0; /* */ fis->d.lbaHigh = 0; /* */ fis->d.device = 0; /* DEV is ignored in SATA */ fis->d.lbaLowExp = 0; /* */ fis->d.lbaMidExp = 0; /* */ fis->d.lbaHighExp = 0; /* */ fis->d.featuresExp = 0; /* FIS reserve */ fis->d.sectorCount = 0x01; /* 1 sector counts*/ fis->d.sectorCountExp = 0x00; /* 1 sector counts */ fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satReadLogExtCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG1(("satSendReadLogExt: end status %d\n", status)); return (status); } /*****************************************************************************/ /*! \brief SAT default ATA status and ATA error translation to SCSI. * * SSAT default ATA status and ATA error translation to SCSI. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param satIOContext: Pointer to the SAT IO Context * \param pSense: Pointer to scsiRspSense_t * \param ataStatus: ATA status register * \param ataError: ATA error register * \param interruptContext: Interrupt context * * \return None */ /*****************************************************************************/ GLOBAL void osSatDefaultTranslation( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, satIOContext_t *satIOContext, scsiRspSense_t *pSense, bit8 ataStatus, bit8 ataError, bit32 interruptContext ) { /* * Check for device fault case */ if ( ataStatus & DF_ATA_STATUS_MASK ) { satSetSensePayload( pSense, SCSI_SNSKEY_HARDWARE_ERROR, 0, SCSI_SNSCODE_INTERNAL_TARGET_FAILURE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, interruptContext ); return; } /* * If status error bit it set, need to check the error register */ if ( ataStatus & ERR_ATA_STATUS_MASK ) { if ( ataError & NM_ATA_ERROR_MASK ) { TI_DBG1(("osSatDefaultTranslation: NM_ATA_ERROR ataError= 0x%x, tiIORequest=%p\n", ataError, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_NOT_READY, 0, SCSI_SNSCODE_MEDIUM_NOT_PRESENT, satIOContext); } else if (ataError & UNC_ATA_ERROR_MASK) { TI_DBG1(("osSatDefaultTranslation: UNC_ATA_ERROR ataError= 0x%x, tiIORequest=%p\n", ataError, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_MEDIUM_ERROR, 0, SCSI_SNSCODE_UNRECOVERED_READ_ERROR, satIOContext); } else if (ataError & IDNF_ATA_ERROR_MASK) { TI_DBG1(("osSatDefaultTranslation: IDNF_ATA_ERROR ataError= 0x%x, tiIORequest=%p\n", ataError, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_MEDIUM_ERROR, 0, SCSI_SNSCODE_RECORD_NOT_FOUND, satIOContext); } else if (ataError & MC_ATA_ERROR_MASK) { TI_DBG1(("osSatDefaultTranslation: MC_ATA_ERROR ataError= 0x%x, tiIORequest=%p\n", ataError, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_UNIT_ATTENTION, 0, SCSI_SNSCODE_NOT_READY_TO_READY_CHANGE, satIOContext); } else if (ataError & MCR_ATA_ERROR_MASK) { TI_DBG1(("osSatDefaultTranslation: MCR_ATA_ERROR ataError= 0x%x, tiIORequest=%p\n", ataError, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_UNIT_ATTENTION, 0, SCSI_SNSCODE_OPERATOR_MEDIUM_REMOVAL_REQUEST, satIOContext); } else if (ataError & ICRC_ATA_ERROR_MASK) { TI_DBG1(("osSatDefaultTranslation: ICRC_ATA_ERROR ataError= 0x%x, tiIORequest=%p\n", ataError, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_ABORTED_COMMAND, 0, SCSI_SNSCODE_INFORMATION_UNIT_CRC_ERROR, satIOContext); } else if (ataError & ABRT_ATA_ERROR_MASK) { TI_DBG1(("osSatDefaultTranslation: ABRT_ATA_ERROR ataError= 0x%x, tiIORequest=%p\n", ataError, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_ABORTED_COMMAND, 0, SCSI_SNSCODE_NO_ADDITIONAL_INFO, satIOContext); } else { TI_DBG1(("osSatDefaultTranslation: **** UNEXPECTED ATA_ERROR **** ataError= 0x%x, tiIORequest=%p\n", ataError, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_HARDWARE_ERROR, 0, SCSI_SNSCODE_INTERNAL_TARGET_FAILURE, satIOContext); } /* Send the completion response now */ ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, interruptContext ); return; } else /* (ataStatus & ERR_ATA_STATUS_MASK ) is false */ { /* This case should never happen */ TI_DBG1(("osSatDefaultTranslation: *** UNEXPECTED ATA status 0x%x *** tiIORequest=%p\n", ataStatus, tiIORequest)); satSetSensePayload( pSense, SCSI_SNSKEY_HARDWARE_ERROR, 0, SCSI_SNSCODE_INTERNAL_TARGET_FAILURE, satIOContext); ostiInitiatorIOCompleted( tiRoot, tiIORequest, tiIOSuccess, SCSI_STAT_CHECK_CONDITION, satIOContext->pTiSenseData, interruptContext ); return; } } /*****************************************************************************/ /*! \brief Allocate resource for SAT intervally generated I/O. * * Allocate resource for SAT intervally generated I/O. * * \param tiRoot: Pointer to TISA driver/port instance. * \param satDevData: Pointer to SAT specific device data. * \param allocLength: Length in byte of the DMA mem to allocate, upto * one page size. * \param satIntIo: Pointer (output) to context for SAT internally * generated I/O that is allocated by this routine. * * \return If command is started successfully * - \e tiSuccess: Success. * - \e tiError: Failed allocating resource. */ /*****************************************************************************/ GLOBAL satInternalIo_t * satAllocIntIoResource( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, satDeviceData_t *satDevData, bit32 dmaAllocLength, satInternalIo_t *satIntIo) { tdList_t *tdList = agNULL; bit32 memAllocStatus; TI_DBG1(("satAllocIntIoResource: start\n")); TI_DBG6(("satAllocIntIoResource: satIntIo %p\n", satIntIo)); if (satDevData == agNULL) { TI_DBG1(("satAllocIntIoResource: ***** ASSERT satDevData is null\n")); return agNULL; } tdsaSingleThreadedEnter(tiRoot, TD_SATA_LOCK); if (!TDLIST_EMPTY(&(satDevData->satFreeIntIoLinkList))) { TDLIST_DEQUEUE_FROM_HEAD(&tdList, &(satDevData->satFreeIntIoLinkList)); } else { tdsaSingleThreadedLeave(tiRoot, TD_SATA_LOCK); TI_DBG1(("satAllocIntIoResource() no more internal free link.\n")); return agNULL; } if (tdList == agNULL) { tdsaSingleThreadedLeave(tiRoot, TD_SATA_LOCK); TI_DBG1(("satAllocIntIoResource() FAIL to alloc satIntIo.\n")); return agNULL; } satIntIo = TDLIST_OBJECT_BASE( satInternalIo_t, satIntIoLink, tdList); TI_DBG6(("satAllocIntIoResource: satDevData %p satIntIo id %d\n", satDevData, satIntIo->id)); /* Put in active list */ TDLIST_DEQUEUE_THIS (&(satIntIo->satIntIoLink)); TDLIST_ENQUEUE_AT_TAIL (&(satIntIo->satIntIoLink), &(satDevData->satActiveIntIoLinkList)); tdsaSingleThreadedLeave(tiRoot, TD_SATA_LOCK); #ifdef REMOVED /* Put in active list */ tdsaSingleThreadedEnter(tiRoot, TD_SATA_LOCK); TDLIST_DEQUEUE_THIS (tdList); TDLIST_ENQUEUE_AT_TAIL (tdList, &(satDevData->satActiveIntIoLinkList)); tdsaSingleThreadedLeave(tiRoot, TD_SATA_LOCK); satIntIo = TDLIST_OBJECT_BASE( satInternalIo_t, satIntIoLink, tdList); TI_DBG6(("satAllocIntIoResource: satDevData %p satIntIo id %d\n", satDevData, satIntIo->id)); #endif /* typedef struct { tdList_t satIntIoLink; tiIORequest_t satIntTiIORequest; void *satIntRequestBody; tiScsiInitiatorRequest_t satIntTiScsiXchg; tiMem_t satIntDmaMem; tiMem_t satIntReqBodyMem; bit32 satIntFlag; } satInternalIo_t; */ /* * Allocate mem for Request Body */ satIntIo->satIntReqBodyMem.totalLength = sizeof(tdIORequestBody_t); memAllocStatus = ostiAllocMemory( tiRoot, &satIntIo->satIntReqBodyMem.osHandle, (void **)&satIntIo->satIntRequestBody, &satIntIo->satIntReqBodyMem.physAddrUpper, &satIntIo->satIntReqBodyMem.physAddrLower, 8, satIntIo->satIntReqBodyMem.totalLength, agTRUE ); if (memAllocStatus != tiSuccess) { TI_DBG1(("satAllocIntIoResource() FAIL to alloc mem for Req Body.\n")); /* * Return satIntIo to the free list */ tdsaSingleThreadedEnter(tiRoot, TD_SATA_LOCK); TDLIST_DEQUEUE_THIS (&satIntIo->satIntIoLink); TDLIST_ENQUEUE_AT_HEAD(&satIntIo->satIntIoLink, &satDevData->satFreeIntIoLinkList); tdsaSingleThreadedLeave(tiRoot, TD_SATA_LOCK); return agNULL; } /* * Allocate DMA memory if required */ if (dmaAllocLength != 0) { satIntIo->satIntDmaMem.totalLength = dmaAllocLength; memAllocStatus = ostiAllocMemory( tiRoot, &satIntIo->satIntDmaMem.osHandle, (void **)&satIntIo->satIntDmaMem.virtPtr, &satIntIo->satIntDmaMem.physAddrUpper, &satIntIo->satIntDmaMem.physAddrLower, 8, satIntIo->satIntDmaMem.totalLength, agFALSE); TI_DBG6(("satAllocIntIoResource: len %d \n", satIntIo->satIntDmaMem.totalLength)); TI_DBG6(("satAllocIntIoResource: pointer %p \n", satIntIo->satIntDmaMem.osHandle)); if (memAllocStatus != tiSuccess) { TI_DBG1(("satAllocIntIoResource() FAIL to alloc mem for DMA mem.\n")); /* * Return satIntIo to the free list */ tdsaSingleThreadedEnter(tiRoot, TD_SATA_LOCK); TDLIST_DEQUEUE_THIS (&satIntIo->satIntIoLink); TDLIST_ENQUEUE_AT_HEAD(&satIntIo->satIntIoLink, &satDevData->satFreeIntIoLinkList); tdsaSingleThreadedLeave(tiRoot, TD_SATA_LOCK); /* * Free mem allocated for Req body */ ostiFreeMemory( tiRoot, satIntIo->satIntReqBodyMem.osHandle, satIntIo->satIntReqBodyMem.totalLength); return agNULL; } } /* typedef struct { tdList_t satIntIoLink; tiIORequest_t satIntTiIORequest; void *satIntRequestBody; tiScsiInitiatorRequest_t satIntTiScsiXchg; tiMem_t satIntDmaMem; tiMem_t satIntReqBodyMem; bit32 satIntFlag; } satInternalIo_t; */ /* * Initialize satIntTiIORequest field */ satIntIo->satIntTiIORequest.osData = agNULL; /* Not used for internal SAT I/O */ satIntIo->satIntTiIORequest.tdData = satIntIo->satIntRequestBody; /* * saves the original tiIOrequest */ satIntIo->satOrgTiIORequest = tiIORequest; /* typedef struct tiIniScsiCmnd { tiLUN_t lun; bit32 expDataLength; bit32 taskAttribute; bit32 crn; bit8 cdb[16]; } tiIniScsiCmnd_t; typedef struct tiScsiInitiatorExchange { void *sglVirtualAddr; tiIniScsiCmnd_t scsiCmnd; tiSgl_t agSgl1; tiSgl_t agSgl2; tiDataDirection_t dataDirection; } tiScsiInitiatorRequest_t; */ /* * Initialize satIntTiScsiXchg. Since the internal SAT request is NOT * originated from SCSI request, only the following fields are initialized: * - sglVirtualAddr if DMA transfer is involved * - agSgl1 if DMA transfer is involved * - expDataLength in scsiCmnd since this field is read by sataLLIOStart() */ if (dmaAllocLength != 0) { satIntIo->satIntTiScsiXchg.sglVirtualAddr = satIntIo->satIntDmaMem.virtPtr; OSSA_WRITE_LE_32(agNULL, &satIntIo->satIntTiScsiXchg.agSgl1.len, 0, satIntIo->satIntDmaMem.totalLength); satIntIo->satIntTiScsiXchg.agSgl1.lower = satIntIo->satIntDmaMem.physAddrLower; satIntIo->satIntTiScsiXchg.agSgl1.upper = satIntIo->satIntDmaMem.physAddrUpper; satIntIo->satIntTiScsiXchg.agSgl1.type = tiSgl; satIntIo->satIntTiScsiXchg.scsiCmnd.expDataLength = satIntIo->satIntDmaMem.totalLength; } else { satIntIo->satIntTiScsiXchg.sglVirtualAddr = agNULL; satIntIo->satIntTiScsiXchg.agSgl1.len = 0; satIntIo->satIntTiScsiXchg.agSgl1.lower = 0; satIntIo->satIntTiScsiXchg.agSgl1.upper = 0; satIntIo->satIntTiScsiXchg.agSgl1.type = tiSgl; satIntIo->satIntTiScsiXchg.scsiCmnd.expDataLength = 0; } TI_DBG5(("satAllocIntIoResource: satIntIo->satIntTiScsiXchg.agSgl1.len %d\n", satIntIo->satIntTiScsiXchg.agSgl1.len)); TI_DBG5(("satAllocIntIoResource: satIntIo->satIntTiScsiXchg.agSgl1.upper %d\n", satIntIo->satIntTiScsiXchg.agSgl1.upper)); TI_DBG5(("satAllocIntIoResource: satIntIo->satIntTiScsiXchg.agSgl1.lower %d\n", satIntIo->satIntTiScsiXchg.agSgl1.lower)); TI_DBG5(("satAllocIntIoResource: satIntIo->satIntTiScsiXchg.agSgl1.type %d\n", satIntIo->satIntTiScsiXchg.agSgl1.type)); TI_DBG5(("satAllocIntIoResource: return satIntIo %p\n", satIntIo)); return satIntIo; } /*****************************************************************************/ /*! \brief Free resource for SAT intervally generated I/O. * * Free resource for SAT intervally generated I/O that was previously * allocated in satAllocIntIoResource(). * * \param tiRoot: Pointer to TISA driver/port instance. * \param satDevData: Pointer to SAT specific device data. * \param satIntIo: Pointer to context for SAT internal I/O that was * previously allocated in satAllocIntIoResource(). * * \return None */ /*****************************************************************************/ GLOBAL void satFreeIntIoResource( tiRoot_t *tiRoot, satDeviceData_t *satDevData, satInternalIo_t *satIntIo) { TI_DBG6(("satFreeIntIoResource: start\n")); if (satIntIo == agNULL) { TI_DBG6(("satFreeIntIoResource: allowed call\n")); return; } /* sets the original tiIOrequest to agNULL for internally generated ATA cmnd */ satIntIo->satOrgTiIORequest = agNULL; /* * Free DMA memory if previosly alocated */ if (satIntIo->satIntTiScsiXchg.scsiCmnd.expDataLength != 0) { TI_DBG1(("satFreeIntIoResource: DMA len %d\n", satIntIo->satIntDmaMem.totalLength)); TI_DBG6(("satFreeIntIoResource: pointer %p\n", satIntIo->satIntDmaMem.osHandle)); ostiFreeMemory( tiRoot, satIntIo->satIntDmaMem.osHandle, satIntIo->satIntDmaMem.totalLength); satIntIo->satIntTiScsiXchg.scsiCmnd.expDataLength = 0; } if (satIntIo->satIntReqBodyMem.totalLength != 0) { TI_DBG1(("satFreeIntIoResource: req body len %d\n", satIntIo->satIntReqBodyMem.totalLength)); /* * Free mem allocated for Req body */ ostiFreeMemory( tiRoot, satIntIo->satIntReqBodyMem.osHandle, satIntIo->satIntReqBodyMem.totalLength); satIntIo->satIntReqBodyMem.totalLength = 0; } TI_DBG6(("satFreeIntIoResource: satDevData %p satIntIo id %d\n", satDevData, satIntIo->id)); /* * Return satIntIo to the free list */ tdsaSingleThreadedEnter(tiRoot, TD_SATA_LOCK); TDLIST_DEQUEUE_THIS (&(satIntIo->satIntIoLink)); TDLIST_ENQUEUE_AT_TAIL (&(satIntIo->satIntIoLink), &(satDevData->satFreeIntIoLinkList)); tdsaSingleThreadedLeave(tiRoot, TD_SATA_LOCK); } /*****************************************************************************/ /*! \brief SAT implementation for SCSI INQUIRY. * * SAT implementation for SCSI INQUIRY. * This function sends ATA Identify Device data command for SCSI INQUIRY * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satSendIDDev( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; agsaFisRegHostToDevice_t *fis; #ifdef TD_DEBUG_ENABLE satInternalIo_t *satIntIoContext; tdsaDeviceData_t *oneDeviceData; tdIORequestBody_t *tdIORequestBody; #endif pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; TI_DBG5(("satSendIDDev: start\n")); #ifdef TD_DEBUG_ENABLE oneDeviceData = (tdsaDeviceData_t *)tiDeviceHandle->tdData; #endif TI_DBG5(("satSendIDDev: did %d\n", oneDeviceData->id)); #ifdef TD_DEBUG_ENABLE satIntIoContext = satIOContext->satIntIoContext; tdIORequestBody = satIntIoContext->satIntRequestBody; #endif TI_DBG5(("satSendIDDev: satIOContext %p tdIORequestBody %p\n", satIOContext, tdIORequestBody)); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ if (pSatDevData->satDeviceType == SATA_ATAPI_DEVICE) fis->h.command = SAT_IDENTIFY_PACKET_DEVICE; /* 0x40 */ else fis->h.command = SAT_IDENTIFY_DEVICE; /* 0xEC */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.device = 0; /* FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satInquiryCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ #ifdef TD_INTERNAL_DEBUG tdhexdump("satSendIDDev", (bit8 *)satIOContext->pFis, sizeof(agsaFisRegHostToDevice_t)); #ifdef TD_DEBUG_ENABLE tdhexdump("satSendIDDev LL", (bit8 *)&(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev), sizeof(agsaFisRegHostToDevice_t)); #endif #endif status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG6(("satSendIDDev: end status %d\n", status)); return status; } /*****************************************************************************/ /*! \brief SAT implementation for SCSI INQUIRY. * * SAT implementation for SCSI INQUIRY. * This function prepares TD layer internal resource to send ATA * Identify Device data command for SCSI INQUIRY * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ /* prerequsite: tdsaDeviceData and agdevhandle must exist; in other words, LL discovered the device already */ /* convert OS generated IO to TD generated IO due to difference in sgl */ GLOBAL bit32 satStartIDDev( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext ) { satInternalIo_t *satIntIo = agNULL; satDeviceData_t *satDevData = agNULL; tdIORequestBody_t *tdIORequestBody; satIOContext_t *satNewIOContext; bit32 status; TI_DBG6(("satStartIDDev: start\n")); satDevData = satIOContext->pSatDevData; TI_DBG6(("satStartIDDev: before alloc\n")); /* allocate identify device command */ satIntIo = satAllocIntIoResource( tiRoot, tiIORequest, satDevData, sizeof(agsaSATAIdentifyData_t), /* 512; size of identify device data */ satIntIo); TI_DBG6(("satStartIDDev: before after\n")); if (satIntIo == agNULL) { TI_DBG1(("satStartIDDev: can't alloacate\n")); #if 0 ostiInitiatorIOCompleted ( tiRoot, tiIORequest, tiIOFailed, tiDetailOtherError, agNULL, satIOContext->interruptContext ); #endif return tiError; } /* fill in fields */ /* real ttttttthe one worked and the same; 5/21/07/ */ satIntIo->satOrgTiIORequest = tiIORequest; /* changed */ tdIORequestBody = satIntIo->satIntRequestBody; satNewIOContext = &(tdIORequestBody->transport.SATA.satIOContext); satNewIOContext->pSatDevData = satDevData; satNewIOContext->pFis = &(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev); satNewIOContext->pScsiCmnd = &(satIntIo->satIntTiScsiXchg.scsiCmnd); satNewIOContext->pSense = &(tdIORequestBody->transport.SATA.sensePayload); satNewIOContext->pTiSenseData = &(tdIORequestBody->transport.SATA.tiSenseData); satNewIOContext->tiRequestBody = satIntIo->satIntRequestBody; /* key fix */ satNewIOContext->interruptContext = tiInterruptContext; satNewIOContext->satIntIoContext = satIntIo; satNewIOContext->ptiDeviceHandle = agNULL; satNewIOContext->satOrgIOContext = satIOContext; /* changed */ /* this is valid only for TD layer generated (not triggered by OS at all) IO */ satNewIOContext->tiScsiXchg = &(satIntIo->satIntTiScsiXchg); TI_DBG6(("satStartIDDev: OS satIOContext %p \n", satIOContext)); TI_DBG6(("satStartIDDev: TD satNewIOContext %p \n", satNewIOContext)); TI_DBG6(("satStartIDDev: OS tiScsiXchg %p \n", satIOContext->tiScsiXchg)); TI_DBG6(("satStartIDDev: TD tiScsiXchg %p \n", satNewIOContext->tiScsiXchg)); TI_DBG1(("satStartIDDev: satNewIOContext %p tdIORequestBody %p\n", satNewIOContext, tdIORequestBody)); status = satSendIDDev( tiRoot, &satIntIo->satIntTiIORequest, /* New tiIORequest */ tiDeviceHandle, satNewIOContext->tiScsiXchg, /* New tiScsiInitiatorRequest_t *tiScsiRequest, */ satNewIOContext); if (status != tiSuccess) { TI_DBG1(("satStartIDDev: failed in sending\n")); satFreeIntIoResource( tiRoot, satDevData, satIntIo); #if 0 ostiInitiatorIOCompleted ( tiRoot, tiIORequest, tiIOFailed, tiDetailOtherError, agNULL, satIOContext->interruptContext ); #endif return tiError; } TI_DBG6(("satStartIDDev: end\n")); return status; } /*****************************************************************************/ /*! \brief satComputeCDB10LBA. * * This fuctions computes LBA of CDB10. * * \param satIOContext_t: Pointer to the SAT IO Context * * \return * - \e LBA */ /*****************************************************************************/ bit32 satComputeCDB10LBA(satIOContext_t *satIOContext) { tiIniScsiCmnd_t *scsiCmnd; tiScsiInitiatorRequest_t *tiScsiRequest; bit32 lba = 0; TI_DBG5(("satComputeCDB10LBA: start\n")); tiScsiRequest = satIOContext->tiScsiXchg; scsiCmnd = &(tiScsiRequest->scsiCmnd); lba = (scsiCmnd->cdb[2] << (8*3)) + (scsiCmnd->cdb[3] << (8*2)) + (scsiCmnd->cdb[4] << 8) + scsiCmnd->cdb[5]; return lba; } /*****************************************************************************/ /*! \brief satComputeCDB10TL. * * This fuctions computes transfer length of CDB10. * * \param satIOContext_t: Pointer to the SAT IO Context * * \return * - \e TL */ /*****************************************************************************/ bit32 satComputeCDB10TL(satIOContext_t *satIOContext) { tiIniScsiCmnd_t *scsiCmnd; tiScsiInitiatorRequest_t *tiScsiRequest; bit32 tl = 0; TI_DBG5(("satComputeCDB10TL: start\n")); tiScsiRequest = satIOContext->tiScsiXchg; scsiCmnd = &(tiScsiRequest->scsiCmnd); tl = (scsiCmnd->cdb[7] << 8) + scsiCmnd->cdb[8]; return tl; } /*****************************************************************************/ /*! \brief satComputeCDB12LBA. * * This fuctions computes LBA of CDB12. * * \param satIOContext_t: Pointer to the SAT IO Context * * \return * - \e LBA */ /*****************************************************************************/ bit32 satComputeCDB12LBA(satIOContext_t *satIOContext) { tiIniScsiCmnd_t *scsiCmnd; tiScsiInitiatorRequest_t *tiScsiRequest; bit32 lba = 0; TI_DBG5(("satComputeCDB10LBA: start\n")); tiScsiRequest = satIOContext->tiScsiXchg; scsiCmnd = &(tiScsiRequest->scsiCmnd); lba = (scsiCmnd->cdb[2] << (8*3)) + (scsiCmnd->cdb[3] << (8*2)) + (scsiCmnd->cdb[4] << 8) + scsiCmnd->cdb[5]; return lba; } /*****************************************************************************/ /*! \brief satComputeCDB12TL. * * This fuctions computes transfer length of CDB12. * * \param satIOContext_t: Pointer to the SAT IO Context * * \return * - \e TL */ /*****************************************************************************/ bit32 satComputeCDB12TL(satIOContext_t *satIOContext) { tiIniScsiCmnd_t *scsiCmnd; tiScsiInitiatorRequest_t *tiScsiRequest; bit32 tl = 0; TI_DBG5(("satComputeCDB10TL: start\n")); tiScsiRequest = satIOContext->tiScsiXchg; scsiCmnd = &(tiScsiRequest->scsiCmnd); tl = (scsiCmnd->cdb[6] << (8*3)) + (scsiCmnd->cdb[7] << (8*2)) + (scsiCmnd->cdb[8] << 8) + scsiCmnd->cdb[9]; return tl; } /*****************************************************************************/ /*! \brief satComputeCDB16LBA. * * This fuctions computes LBA of CDB16. * * \param satIOContext_t: Pointer to the SAT IO Context * * \return * - \e LBA */ /*****************************************************************************/ /* CBD16 has bit64 LBA But it has to be less than (2^28 - 1) Therefore, use last four bytes to compute LBA is OK */ bit32 satComputeCDB16LBA(satIOContext_t *satIOContext) { tiIniScsiCmnd_t *scsiCmnd; tiScsiInitiatorRequest_t *tiScsiRequest; bit32 lba = 0; TI_DBG5(("satComputeCDB10LBA: start\n")); tiScsiRequest = satIOContext->tiScsiXchg; scsiCmnd = &(tiScsiRequest->scsiCmnd); lba = (scsiCmnd->cdb[6] << (8*3)) + (scsiCmnd->cdb[7] << (8*2)) + (scsiCmnd->cdb[8] << 8) + scsiCmnd->cdb[9]; return lba; } /*****************************************************************************/ /*! \brief satComputeCDB16TL. * * This fuctions computes transfer length of CDB16. * * \param satIOContext_t: Pointer to the SAT IO Context * * \return * - \e TL */ /*****************************************************************************/ bit32 satComputeCDB16TL(satIOContext_t *satIOContext) { tiIniScsiCmnd_t *scsiCmnd; tiScsiInitiatorRequest_t *tiScsiRequest; bit32 tl = 0; TI_DBG5(("satComputeCDB10TL: start\n")); tiScsiRequest = satIOContext->tiScsiXchg; scsiCmnd = &(tiScsiRequest->scsiCmnd); tl = (scsiCmnd->cdb[10] << (8*3)) + (scsiCmnd->cdb[11] << (8*2)) + (scsiCmnd->cdb[12] << 8) + scsiCmnd->cdb[13]; return tl; } /*****************************************************************************/ /*! \brief satComputeLoopNum. * * This fuctions computes the number of interation needed for a transfer * length with a specific number. * * \param a: a numerator * \param b: a denominator * * \return * - \e number of interation */ /*****************************************************************************/ /* (tl, denom) tl can be upto bit32 because CDB16 has bit32 tl Therefore, fine either (tl, 0xFF) or (tl, 0xFFFF) */ bit32 satComputeLoopNum(bit32 a, bit32 b) { bit32 quo = 0, rem = 0; bit32 LoopNum = 0; TI_DBG5(("satComputeLoopNum: start\n")); quo = a/b; if (quo == 0) { LoopNum = 1; } else { rem = a % b; if (rem == 0) { LoopNum = quo; } else { LoopNum = quo + 1; } } return LoopNum; } /*****************************************************************************/ /*! \brief satAddNComparebit64. * * * * * \param a: lba * \param b: tl * * \return * - \e TRUE if (lba + tl > SAT_TR_LBA_LIMIT) * - \e FALSE otherwise * \note: a and b must be in the same length */ /*****************************************************************************/ /* input: bit8 a[8], bit8 b[8] (lba, tl) must be in same length if (lba + tl > SAT_TR_LBA_LIMIT) then returns true else returns false (LBA,TL) */ bit32 satAddNComparebit64(bit8 *a, bit8 *b) { bit16 ans[8]; // 0 MSB, 8 LSB bit8 final_ans[9]; // 0 MSB, 9 LSB bit8 max[9]; int i; TI_DBG5(("satAddNComparebit64: start\n")); osti_memset(ans, 0, sizeof(ans)); osti_memset(final_ans, 0, sizeof(final_ans)); osti_memset(max, 0, sizeof(max)); max[0] = 0x1; //max = 0x1 0000 0000 0000 0000 // adding from LSB to MSB for(i=7;i>=0;i--) { ans[i] = (bit16)(a[i] + b[i]); if (i != 7) { ans[i] = (bit16)(ans[i] + ((ans[i+1] & 0xFF00) >> 8)); } } /* filling in the final answer */ final_ans[0] = (bit8)(((ans[0] & 0xFF00) >> 8)); final_ans[1] = (bit8)(ans[0] & 0xFF); for(i=2;i<=8;i++) { final_ans[i] = (bit8)(ans[i-1] & 0xFF); } //compare final_ans to max for(i=0;i<=8;i++) { if (final_ans[i] > max[i]) { TI_DBG5(("satAddNComparebit64: yes at %d\n", i)); return agTRUE; } else if (final_ans[i] < max[i]) { TI_DBG5(("satAddNComparebit64: no at %d\n", i)); return agFALSE; } else { continue; } } return agFALSE; } /*****************************************************************************/ /*! \brief satAddNComparebit32. * * * * * \param a: lba * \param b: tl * * \return * - \e TRUE if (lba + tl > SAT_TR_LBA_LIMIT) * - \e FALSE otherwise * \note: a and b must be in the same length */ /*****************************************************************************/ /* input: bit8 a[4], bit8 b[4] (lba, tl) must be in same length if (lba + tl > SAT_TR_LBA_LIMIT) then returns true else returns false (LBA,TL) */ bit32 satAddNComparebit32(bit8 *a, bit8 *b) { bit16 ans[4]; // 0 MSB, 4 LSB bit8 final_ans[5]; // 0 MSB, 5 LSB bit8 max[4]; int i; TI_DBG5(("satAddNComparebit32: start\n")); osti_memset(ans, 0, sizeof(ans)); osti_memset(final_ans, 0, sizeof(final_ans)); osti_memset(max, 0, sizeof(max)); max[0] = 0x10; // max =0x1000 0000 // adding from LSB to MSB for(i=3;i>=0;i--) { ans[i] = (bit16)(a[i] + b[i]); if (i != 3) { ans[i] = (bit16)(ans[i] + ((ans[i+1] & 0xFF00) >> 8)); } } /* filling in the final answer */ final_ans[0] = (bit8)(((ans[0] & 0xFF00) >> 8)); final_ans[1] = (bit8)(ans[0] & 0xFF); for(i=2;i<=4;i++) { final_ans[i] = (bit8)(ans[i-1] & 0xFF); } //compare final_ans to max if (final_ans[0] != 0) { TI_DBG5(("satAddNComparebit32: yes bigger and out of range\n")); return agTRUE; } for(i=1;i<=4;i++) { if (final_ans[i] > max[i-1]) { TI_DBG5(("satAddNComparebit32: yes at %d\n", i)); return agTRUE; } else if (final_ans[i] < max[i-1]) { TI_DBG5(("satAddNComparebit32: no at %d\n", i)); return agFALSE; } else { continue; } } - return agFALSE;; + return agFALSE; } /*****************************************************************************/ /*! \brief satCompareLBALimitbit. * * * * * \param lba: lba * * \return * - \e TRUE if (lba > SAT_TR_LBA_LIMIT - 1) * - \e FALSE otherwise * \note: a and b must be in the same length */ /*****************************************************************************/ /* lba */ /* input: bit8 lba[8] if (lba > SAT_TR_LBA_LIMIT - 1) then returns true else returns false (LBA,TL) */ bit32 satCompareLBALimitbit(bit8 *lba) { bit32 i; bit8 limit[8]; /* limit is 0xF FF FF = 2^28 - 1 */ limit[0] = 0x0; /* MSB */ limit[1] = 0x0; limit[2] = 0x0; limit[3] = 0x0; limit[4] = 0xF; limit[5] = 0xFF; limit[6] = 0xFF; limit[7] = 0xFF; /* LSB */ //compare lba to limit for(i=0;i<8;i++) { if (lba[i] > limit[i]) { TI_DBG5(("satCompareLBALimitbit64: yes at %d\n", i)); return agTRUE; } else if (lba[i] < limit[i]) { TI_DBG5(("satCompareLBALimitbit64: no at %d\n", i)); return agFALSE; } else { continue; } } return agFALSE; } /***************************************************************************** *! \brief * Purpose: bitwise set * * Parameters: * data - input output buffer * index - bit to set * * Return: * none * *****************************************************************************/ GLOBAL void satBitSet(bit8 *data, bit32 index) { data[index/8] |= (1 << (index%8)); } /***************************************************************************** *! \brief * Purpose: bitwise clear * * Parameters: * data - input output buffer * index - bit to clear * * Return: * none * *****************************************************************************/ GLOBAL void satBitClear(bit8 *data, bit32 index) { data[index/8] &= ~(1 << (index%8)); } /***************************************************************************** *! \brief * Purpose: bitwise test * * Parameters: * data - input output buffer * index - bit to test * * Return: * 0 - not set * 1 - set * *****************************************************************************/ GLOBAL agBOOLEAN satBitTest(bit8 *data, bit32 index) { return ( (BOOLEAN)((data[index/8] & (1 << (index%8)) ) ? 1: 0)); } /******************************************************************************/ /*! \brief allocate an available SATA tag * * allocate an available SATA tag * * \param tiRoot Pointer to TISA initiator driver/port instance. * \param pSatDevData * \param pTag * * \return -Success or fail- */ /*******************************************************************************/ GLOBAL bit32 satTagAlloc( tiRoot_t *tiRoot, satDeviceData_t *pSatDevData, bit8 *pTag ) { bit32 retCode = agFALSE; bit32 i; tdsaSingleThreadedEnter(tiRoot, TD_SATA_LOCK); for ( i = 0; i < pSatDevData->satNCQMaxIO; i ++ ) { if ( 0 == satBitTest((bit8 *)&pSatDevData->freeSATAFDMATagBitmap, i) ) { satBitSet((bit8*)&pSatDevData->freeSATAFDMATagBitmap, i); *pTag = (bit8) i; retCode = agTRUE; break; } } tdsaSingleThreadedLeave(tiRoot, TD_SATA_LOCK); return retCode; } /******************************************************************************/ /*! \brief release an SATA tag * * release an available SATA tag * * \param tiRoot Pointer to TISA initiator driver/port instance. * \param pSatDevData * \param Tag * * \return -the tag- */ /*******************************************************************************/ GLOBAL bit32 satTagRelease( tiRoot_t *tiRoot, satDeviceData_t *pSatDevData, bit8 tag ) { bit32 retCode = agFALSE; tdsaSingleThreadedEnter(tiRoot, TD_SATA_LOCK); if ( tag < pSatDevData->satNCQMaxIO ) { satBitClear( (bit8 *)&pSatDevData->freeSATAFDMATagBitmap, (bit32)tag); retCode = agTRUE; } tdsaSingleThreadedLeave(tiRoot, TD_SATA_LOCK); return retCode; } /***************************************************************************** *! \brief satSubTM * * This routine is called to initiate a TM request to SATL. * This routine is independent of HW/LL API. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param task: SAM-3 task management request. * \param lun: Pointer to LUN. * \param taskTag: Pointer to the associated task where the TM * command is to be applied. * \param currentTaskTag: Pointer to tag/context for this TM request. * \param NotifyOS flag determines whether notify OS layer or not * * \return: * * \e tiSuccess: I/O request successfully initiated. * \e tiBusy: No resources available, try again later. * \e tiIONoDevice: Invalid device handle. * \e tiError: Other errors that prevent the I/O request to be started. * * \note: * This funcion is triggered bottom up. Not yet in use. *****************************************************************************/ /* called for bottom up */ osGLOBAL bit32 satSubTM( tiRoot_t *tiRoot, tiDeviceHandle_t *tiDeviceHandle, bit32 task, tiLUN_t *lun, tiIORequest_t *taskTag, tiIORequest_t *currentTaskTag, bit32 NotifyOS ) { void *osMemHandle; tdIORequestBody_t *TMtdIORequestBody; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; agsaIORequest_t *agIORequest = agNULL; TI_DBG6(("satSubTM: start\n")); /* allocation tdIORequestBody and pass it to satTM() */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&TMtdIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { TI_DBG1(("satSubTM: ostiAllocMemory failed... \n")); return tiError; } if (TMtdIORequestBody == agNULL) { TI_DBG1(("satSubTM: ostiAllocMemory returned NULL TMIORequestBody\n")); return tiError; } /* setup task management structure */ TMtdIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; TMtdIORequestBody->IOType.InitiatorTMIO.CurrentTaskTag = agNULL; TMtdIORequestBody->IOType.InitiatorTMIO.TaskTag = agNULL; /* initialize tiDevhandle */ TMtdIORequestBody->tiDevHandle = tiDeviceHandle; /* initialize tiIORequest */ TMtdIORequestBody->tiIORequest = agNULL; /* initialize agIORequest */ agIORequest = &(TMtdIORequestBody->agIORequest); agIORequest->osData = (void *) TMtdIORequestBody; agIORequest->sdkData = agNULL; /* SA takes care of this */ satTM(tiRoot, tiDeviceHandle, task, /* TD_INTERNAL_TM_RESET */ agNULL, agNULL, agNULL, TMtdIORequestBody, agFALSE); return tiSuccess; } /*****************************************************************************/ /*! \brief SAT implementation for satStartResetDevice. * * SAT implementation for sending SRT and send FIS request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. * \note : triggerred by OS layer or bottom up */ /*****************************************************************************/ /* OS triggerred or bottom up */ GLOBAL bit32 satStartResetDevice( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, /* currentTaskTag */ tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, /* should be NULL */ satIOContext_t *satIOContext ) { satInternalIo_t *satIntIo = agNULL; satDeviceData_t *satDevData = agNULL; satIOContext_t *satNewIOContext; bit32 status; tiIORequest_t *currentTaskTag = agNULL; TI_DBG1(("satStartResetDevice: start\n")); currentTaskTag = tiIORequest; satDevData = satIOContext->pSatDevData; TI_DBG6(("satStartResetDevice: before alloc\n")); /* allocate any fis for seting SRT bit in device control */ satIntIo = satAllocIntIoResource( tiRoot, tiIORequest, satDevData, 0, satIntIo); TI_DBG6(("satStartResetDevice: before after\n")); if (satIntIo == agNULL) { TI_DBG1(("satStartResetDevice: can't alloacate\n")); if (satIOContext->NotifyOS) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, currentTaskTag ); } return tiError; } satNewIOContext = satPrepareNewIO(satIntIo, tiIORequest, satDevData, agNULL, satIOContext); TI_DBG6(("satStartResetDevice: OS satIOContext %p \n", satIOContext)); TI_DBG6(("satStartResetDevice: TD satNewIOContext %p \n", satNewIOContext)); TI_DBG6(("satStartResetDevice: OS tiScsiXchg %p \n", satIOContext->tiScsiXchg)); TI_DBG6(("satStartResetDevice: TD tiScsiXchg %p \n", satNewIOContext->tiScsiXchg)); TI_DBG6(("satStartResetDevice: satNewIOContext %p \n", satNewIOContext)); if (satDevData->satDeviceType == SATA_ATAPI_DEVICE) { status = satDeviceReset(tiRoot, &satIntIo->satIntTiIORequest, /* New tiIORequest */ tiDeviceHandle, satNewIOContext->tiScsiXchg, /* New tiScsiInitiatorRequest_t *tiScsiRequest, */ satNewIOContext); } else { status = satResetDevice(tiRoot, &satIntIo->satIntTiIORequest, /* New tiIORequest */ tiDeviceHandle, satNewIOContext->tiScsiXchg, /* New tiScsiInitiatorRequest_t *tiScsiRequest, */ satNewIOContext); } if (status != tiSuccess) { TI_DBG1(("satStartResetDevice: failed in sending\n")); satFreeIntIoResource( tiRoot, satDevData, satIntIo); if (satIOContext->NotifyOS) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, currentTaskTag ); } return tiError; } TI_DBG6(("satStartResetDevice: end\n")); return status; } /*****************************************************************************/ /*! \brief SAT implementation for satResetDevice. * * SAT implementation for building SRT FIS and sends the request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ /* create any fis and set SRST bit in device control */ GLOBAL bit32 satResetDevice( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext ) { bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; #ifdef TD_DEBUG_ENABLE tdIORequestBody_t *tdIORequestBody; satInternalIo_t *satIntIoContext; #endif fis = satIOContext->pFis; TI_DBG2(("satResetDevice: start\n")); #ifdef TD_DEBUG_ENABLE satIntIoContext = satIOContext->satIntIoContext; tdIORequestBody = satIntIoContext->satIntRequestBody; #endif TI_DBG5(("satResetDevice: satIOContext %p tdIORequestBody %p\n", satIOContext, tdIORequestBody)); /* any fis should work */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0; /* C Bit is not set */ fis->h.command = 0; /* any command */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.device = 0; /* FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0x4; /* SRST bit is set */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_SRST_ASSERT; satIOContext->satCompleteCB = &satResetDeviceCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ #ifdef TD_INTERNAL_DEBUG tdhexdump("satResetDevice", (bit8 *)satIOContext->pFis, sizeof(agsaFisRegHostToDevice_t)); #ifdef TD_DEBUG_ENABLE tdhexdump("satResetDevice LL", (bit8 *)&(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev), sizeof(agsaFisRegHostToDevice_t)); #endif #endif status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG6(("satResetDevice: end status %d\n", status)); return status; } /***************************************************************************** *! \brief satResetDeviceCB * * This routine is a callback function called from ossaSATACompleted(). * This CB routine deals with SRT completion. This function send DSRT * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param agIORequest: Pointer to the LL I/O request context for this I/O. * \param agIOStatus: Status of completed I/O. * \param agFirstDword:Pointer to the four bytes of FIS. * \param agIOInfoLen: Length in bytes of overrun/underrun residual or FIS * length. * \param agParam: Additional info based on status. * \param ioContext: Pointer to satIOContext_t. * * \return: none * *****************************************************************************/ GLOBAL void satResetDeviceCB( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, agsaFisHeader_t *agFirstDword, bit32 agIOInfoLen, agsaFrameHandle_t agFrameHandle, void *ioContext ) { /* callback for satResetDevice */ tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdIORequestBody_t *tdIORequestBody; tdIORequestBody_t *tdOrgIORequestBody; satIOContext_t *satIOContext; satIOContext_t *satOrgIOContext; satIOContext_t *satNewIOContext; satInternalIo_t *satIntIo; satInternalIo_t *satNewIntIo = agNULL; satDeviceData_t *satDevData; tiIORequest_t *tiOrgIORequest; #ifdef TD_DEBUG_ENABLE bit32 ataStatus = 0; bit32 ataError; agsaFisPioSetupHeader_t *satPIOSetupHeader = agNULL; #endif bit32 status; TI_DBG1(("satResetDeviceCB: start\n")); TI_DBG6(("satResetDeviceCB: agIORequest=%p agIOStatus=0x%x agIOInfoLen %d\n", agIORequest, agIOStatus, agIOInfoLen)); tdIORequestBody = (tdIORequestBody_t *)agIORequest->osData; satIOContext = (satIOContext_t *) ioContext; satIntIo = satIOContext->satIntIoContext; satDevData = satIOContext->pSatDevData; if (satIntIo == agNULL) { TI_DBG6(("satResetDeviceCB: External, OS generated\n")); satOrgIOContext = satIOContext; tiOrgIORequest = tdIORequestBody->tiIORequest; } else { TI_DBG6(("satResetDeviceCB: Internal, TD generated\n")); satOrgIOContext = satIOContext->satOrgIOContext; if (satOrgIOContext == agNULL) { TI_DBG6(("satResetDeviceCB: satOrgIOContext is NULL, wrong\n")); return; } else { TI_DBG6(("satResetDeviceCB: satOrgIOContext is NOT NULL\n")); } tdOrgIORequestBody = (tdIORequestBody_t *)satOrgIOContext->tiRequestBody; tiOrgIORequest = (tiIORequest_t *)tdOrgIORequestBody->tiIORequest; } tdIORequestBody->ioCompleted = agTRUE; tdIORequestBody->ioStarted = agFALSE; if (agFirstDword == agNULL && agIOStatus != OSSA_IO_SUCCESS) { TI_DBG1(("satResetDeviceCB: wrong. agFirstDword is NULL when error, status %d\n", agIOStatus)); if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return; } if (agIOStatus == OSSA_IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_ZONE_VIOLATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_BREAK || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_BAD_DESTINATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_WRONG_DESTINATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_UNKNOWN_ERROR || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY ) { TI_DBG1(("satResetDeviceCB: OSSA_IO_OPEN_CNX_ERROR\n")); if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return; } if (agIOStatus != OSSA_IO_SUCCESS) { #ifdef TD_DEBUG_ENABLE /* only agsaFisPioSetup_t is expected */ satPIOSetupHeader = (agsaFisPioSetupHeader_t *)&(agFirstDword->PioSetup); ataStatus = satPIOSetupHeader->status; /* ATA Status register */ ataError = satPIOSetupHeader->error; /* ATA Eror register */ #endif TI_DBG1(("satResetDeviceCB: ataStatus 0x%x ataError 0x%x\n", ataStatus, ataError)); if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return; } /* success */ satNewIntIo = satAllocIntIoResource( tiRoot, tiOrgIORequest, satDevData, 0, satNewIntIo); if (satNewIntIo == agNULL) { satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); /* memory allocation failure */ satFreeIntIoResource( tiRoot, satDevData, satNewIntIo); if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } TI_DBG1(("satResetDeviceCB: momory allocation fails\n")); return; } /* end of memory allocation failure */ /* * Need to initialize all the fields within satIOContext */ satNewIOContext = satPrepareNewIO( satNewIntIo, tiOrgIORequest, satDevData, agNULL, satOrgIOContext ); /* send AGSA_SATA_PROTOCOL_SRST_DEASSERT */ status = satDeResetDevice(tiRoot, tiOrgIORequest, satOrgIOContext->ptiDeviceHandle, agNULL, satNewIOContext ); if (status != tiSuccess) { if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } /* sending AGSA_SATA_PROTOCOL_SRST_DEASSERT fails */ satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satNewIntIo); return; } satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); TI_DBG5(("satResetDeviceCB: device %p pending IO %d\n", satDevData, satDevData->satPendingIO)); TI_DBG6(("satResetDeviceCB: end\n")); return; } /*****************************************************************************/ /*! \brief SAT implementation for satDeResetDevice. * * SAT implementation for building DSRT FIS and sends the request to LL layer. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satDeResetDevice( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext ) { bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; #ifdef TD_DEBUG_ENABLE tdIORequestBody_t *tdIORequestBody; satInternalIo_t *satIntIoContext; #endif fis = satIOContext->pFis; TI_DBG6(("satDeResetDevice: start\n")); #ifdef TD_DEBUG_ENABLE satIntIoContext = satIOContext->satIntIoContext; tdIORequestBody = satIntIoContext->satIntRequestBody; TI_DBG5(("satDeResetDevice: satIOContext %p tdIORequestBody %p\n", satIOContext, tdIORequestBody)); #endif /* any fis should work */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0; /* C Bit is not set */ fis->h.command = 0; /* any command */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.device = 0; /* FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* SRST bit is not set */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_SRST_DEASSERT; satIOContext->satCompleteCB = &satDeResetDeviceCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ #ifdef TD_INTERNAL_DEBUG tdhexdump("satDeResetDevice", (bit8 *)satIOContext->pFis, sizeof(agsaFisRegHostToDevice_t)); #ifdef TD_DEBUG_ENABLE tdhexdump("satDeResetDevice LL", (bit8 *)&(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev), sizeof(agsaFisRegHostToDevice_t)); #endif #endif status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG6(("satDeResetDevice: end status %d\n", status)); return status; } /***************************************************************************** *! \brief satDeResetDeviceCB * * This routine is a callback function called from ossaSATACompleted(). * This CB routine deals with DSRT completion. * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param agIORequest: Pointer to the LL I/O request context for this I/O. * \param agIOStatus: Status of completed I/O. * \param agFirstDword:Pointer to the four bytes of FIS. * \param agIOInfoLen: Length in bytes of overrun/underrun residual or FIS * length. * \param agParam: Additional info based on status. * \param ioContext: Pointer to satIOContext_t. * * \return: none * *****************************************************************************/ GLOBAL void satDeResetDeviceCB( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, agsaFisHeader_t *agFirstDword, bit32 agIOInfoLen, agsaFrameHandle_t agFrameHandle, void *ioContext ) { /* callback for satDeResetDevice */ tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdIORequestBody_t *tdIORequestBody; tdIORequestBody_t *tdOrgIORequestBody = agNULL; satIOContext_t *satIOContext; satIOContext_t *satOrgIOContext; satInternalIo_t *satIntIo; satDeviceData_t *satDevData; tiIORequest_t *tiOrgIORequest; #ifdef TD_DEBUG_ENABLE bit32 ataStatus = 0; bit32 ataError; agsaFisPioSetupHeader_t *satPIOSetupHeader = agNULL; #endif bit32 report = agFALSE; bit32 AbortTM = agFALSE; TI_DBG1(("satDeResetDeviceCB: start\n")); TI_DBG6(("satDeResetDeviceCB: agIORequest=%p agIOStatus=0x%x agIOInfoLen %d\n", agIORequest, agIOStatus, agIOInfoLen)); tdIORequestBody = (tdIORequestBody_t *)agIORequest->osData; satIOContext = (satIOContext_t *) ioContext; satIntIo = satIOContext->satIntIoContext; satDevData = satIOContext->pSatDevData; if (satIntIo == agNULL) { TI_DBG6(("satDeResetDeviceCB: External, OS generated\n")); satOrgIOContext = satIOContext; tiOrgIORequest = tdIORequestBody->tiIORequest; } else { TI_DBG6(("satDeResetDeviceCB: Internal, TD generated\n")); satOrgIOContext = satIOContext->satOrgIOContext; if (satOrgIOContext == agNULL) { TI_DBG6(("satDeResetDeviceCB: satOrgIOContext is NULL, wrong\n")); return; } else { TI_DBG6(("satDeResetDeviceCB: satOrgIOContext is NOT NULL\n")); } tdOrgIORequestBody = (tdIORequestBody_t *)satOrgIOContext->tiRequestBody; tiOrgIORequest = (tiIORequest_t *)tdOrgIORequestBody->tiIORequest; } tdIORequestBody->ioCompleted = agTRUE; tdIORequestBody->ioStarted = agFALSE; if (agFirstDword == agNULL && agIOStatus != OSSA_IO_SUCCESS) { TI_DBG1(("satDeResetDeviceCB: wrong. agFirstDword is NULL when error, status %d\n", agIOStatus)); if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return; } if (agIOStatus == OSSA_IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_ZONE_VIOLATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_BREAK || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_BAD_DESTINATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_WRONG_DESTINATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_UNKNOWN_ERROR || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY ) { TI_DBG1(("satDeResetDeviceCB: OSSA_IO_OPEN_CNX_ERROR\n")); if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return; } if (agIOStatus != OSSA_IO_SUCCESS) { #ifdef TD_DEBUG_ENABLE /* only agsaFisPioSetup_t is expected */ satPIOSetupHeader = (agsaFisPioSetupHeader_t *)&(agFirstDword->PioSetup); ataStatus = satPIOSetupHeader->status; /* ATA Status register */ ataError = satPIOSetupHeader->error; /* ATA Eror register */ #endif TI_DBG1(("satDeResetDeviceCB: ataStatus 0x%x ataError 0x%x\n", ataStatus, ataError)); if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return; } /* success */ TI_DBG1(("satDeResetDeviceCB: success \n")); TI_DBG1(("satDeResetDeviceCB: TMF %d\n", satOrgIOContext->TMF)); if (satOrgIOContext->TMF == AG_ABORT_TASK) { AbortTM = agTRUE; } if (satOrgIOContext->NotifyOS == agTRUE) { report = agTRUE; } if (AbortTM == agTRUE) { TI_DBG1(("satDeResetDeviceCB: calling satAbort\n")); satAbort(agRoot, satOrgIOContext->satToBeAbortedIOContext); } satDevData->satTmTaskTag = agNULL; satDevData->satDriveState = SAT_DEV_STATE_NORMAL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); TI_DBG1(("satDeResetDeviceCB: satPendingIO %d satNCQMaxIO %d\n", satDevData->satPendingIO, satDevData->satNCQMaxIO )); TI_DBG1(("satDeResetDeviceCB: satPendingNCQIO %d satPendingNONNCQIO %d\n", satDevData->satPendingNCQIO, satDevData->satPendingNONNCQIO)); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ if (tdOrgIORequestBody != agNULL) { ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else { TI_DBG1(("satDeResetDeviceCB: tdOrgIORequestBody is NULL, wrong\n")); } if (report) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMOK, tiOrgIORequest ); } TI_DBG5(("satDeResetDeviceCB: device %p pending IO %d\n", satDevData, satDevData->satPendingIO)); TI_DBG6(("satDeResetDeviceCB: end\n")); return; } /*****************************************************************************/ /*! \brief SAT implementation for satStartCheckPowerMode. * * SAT implementation for abort task management for non-ncq sata disk. * This function sends CHECK POWER MODE * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satStartCheckPowerMode( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, /* NULL */ satIOContext_t *satIOContext ) { satInternalIo_t *satIntIo = agNULL; satDeviceData_t *satDevData = agNULL; satIOContext_t *satNewIOContext; bit32 status; tiIORequest_t *currentTaskTag = agNULL; TI_DBG6(("satStartCheckPowerMode: start\n")); currentTaskTag = tiIORequest; satDevData = satIOContext->pSatDevData; TI_DBG6(("satStartCheckPowerMode: before alloc\n")); /* allocate any fis for seting SRT bit in device control */ satIntIo = satAllocIntIoResource( tiRoot, tiIORequest, satDevData, 0, satIntIo); TI_DBG6(("satStartCheckPowerMode: before after\n")); if (satIntIo == agNULL) { TI_DBG1(("satStartCheckPowerMode: can't alloacate\n")); if (satIOContext->NotifyOS) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, currentTaskTag ); } return tiError; } satNewIOContext = satPrepareNewIO(satIntIo, tiIORequest, satDevData, agNULL, satIOContext); TI_DBG6(("satStartCheckPowerMode: OS satIOContext %p \n", satIOContext)); TI_DBG6(("satStartCheckPowerMode: TD satNewIOContext %p \n", satNewIOContext)); TI_DBG6(("satStartCheckPowerMode: OS tiScsiXchg %p \n", satIOContext->tiScsiXchg)); TI_DBG6(("satStartCheckPowerMode: TD tiScsiXchg %p \n", satNewIOContext->tiScsiXchg)); TI_DBG1(("satStartCheckPowerMode: satNewIOContext %p \n", satNewIOContext)); status = satCheckPowerMode(tiRoot, &satIntIo->satIntTiIORequest, /* New tiIORequest */ tiDeviceHandle, satNewIOContext->tiScsiXchg, /* New tiScsiInitiatorRequest_t *tiScsiRequest, */ satNewIOContext); if (status != tiSuccess) { TI_DBG1(("satStartCheckPowerMode: failed in sending\n")); satFreeIntIoResource( tiRoot, satDevData, satIntIo); if (satIOContext->NotifyOS) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, currentTaskTag ); } return tiError; } TI_DBG6(("satStartCheckPowerMode: end\n")); return status; } /*****************************************************************************/ /*! \brief SAT implementation for satCheckPowerMode. * * This function creates CHECK POWER MODE fis and sends the request to LL layer * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satCheckPowerMode( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext ) { /* sends SAT_CHECK_POWER_MODE as a part of ABORT TASKMANGEMENT for NCQ commands internally generated - no directly corresponding scsi */ bit32 status; bit32 agRequestType; agsaFisRegHostToDevice_t *fis; fis = satIOContext->pFis; TI_DBG5(("satCheckPowerMode: start\n")); /* * Send the ATA CHECK POWER MODE command. */ fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ fis->h.command = SAT_CHECK_POWER_MODE; /* 0xE5 */ fis->h.features = 0; fis->d.lbaLow = 0; fis->d.lbaMid = 0; fis->d.lbaHigh = 0; fis->d.device = 0; fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_NON_DATA; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satCheckPowerModeCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG5(("satCheckPowerMode: return\n")); return status; } /***************************************************************************** *! \brief satCheckPowerModeCB * * This routine is a callback function called from ossaSATACompleted(). * This CB routine deals with CHECK POWER MODE completion as abort task * management. * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param agIORequest: Pointer to the LL I/O request context for this I/O. * \param agIOStatus: Status of completed I/O. * \param agFirstDword:Pointer to the four bytes of FIS. * \param agIOInfoLen: Length in bytes of overrun/underrun residual or FIS * length. * \param agParam: Additional info based on status. * \param ioContext: Pointer to satIOContext_t. * * \return: none * *****************************************************************************/ GLOBAL void satCheckPowerModeCB( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, agsaFisHeader_t *agFirstDword, bit32 agIOInfoLen, agsaFrameHandle_t agFrameHandle, void *ioContext ) { /* callback for satDeResetDevice */ tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdIORequestBody_t *tdIORequestBody; tdIORequestBody_t *tdOrgIORequestBody = agNULL; satIOContext_t *satIOContext; satIOContext_t *satOrgIOContext; satInternalIo_t *satIntIo; satDeviceData_t *satDevData; tiIORequest_t *tiOrgIORequest; #ifdef TD_DEBUG_ENABLE bit32 ataStatus = 0; bit32 ataError; agsaFisPioSetupHeader_t *satPIOSetupHeader = agNULL; #endif bit32 report = agFALSE; bit32 AbortTM = agFALSE; TI_DBG1(("satCheckPowerModeCB: start\n")); TI_DBG1(("satCheckPowerModeCB: agIORequest=%p agIOStatus=0x%x agIOInfoLen %d\n", agIORequest, agIOStatus, agIOInfoLen)); tdIORequestBody = (tdIORequestBody_t *)agIORequest->osData; satIOContext = (satIOContext_t *) ioContext; satIntIo = satIOContext->satIntIoContext; satDevData = satIOContext->pSatDevData; if (satIntIo == agNULL) { TI_DBG6(("satCheckPowerModeCB: External, OS generated\n")); satOrgIOContext = satIOContext; tiOrgIORequest = tdIORequestBody->tiIORequest; } else { TI_DBG6(("satCheckPowerModeCB: Internal, TD generated\n")); satOrgIOContext = satIOContext->satOrgIOContext; if (satOrgIOContext == agNULL) { TI_DBG6(("satCheckPowerModeCB: satOrgIOContext is NULL, wrong\n")); return; } else { TI_DBG6(("satCheckPowerModeCB: satOrgIOContext is NOT NULL\n")); } tdOrgIORequestBody = (tdIORequestBody_t *)satOrgIOContext->tiRequestBody; tiOrgIORequest = (tiIORequest_t *)tdOrgIORequestBody->tiIORequest; } tdIORequestBody->ioCompleted = agTRUE; tdIORequestBody->ioStarted = agFALSE; if (agFirstDword == agNULL && agIOStatus != OSSA_IO_SUCCESS) { TI_DBG1(("satCheckPowerModeCB: wrong. agFirstDword is NULL when error, status %d\n", agIOStatus)); if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return; } if (agIOStatus == OSSA_IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_ZONE_VIOLATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_BREAK || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_BAD_DESTINATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_WRONG_DESTINATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_UNKNOWN_ERROR || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY ) { TI_DBG1(("satCheckPowerModeCB: OSSA_IO_OPEN_CNX_ERROR\n")); if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return; } if (agIOStatus != OSSA_IO_SUCCESS) { #ifdef TD_DEBUG_ENABLE /* only agsaFisPioSetup_t is expected */ satPIOSetupHeader = (agsaFisPioSetupHeader_t *)&(agFirstDword->PioSetup); ataStatus = satPIOSetupHeader->status; /* ATA Status register */ ataError = satPIOSetupHeader->error; /* ATA Eror register */ #endif TI_DBG1(("satCheckPowerModeCB: ataStatus 0x%x ataError 0x%x\n", ataStatus, ataError)); if (satOrgIOContext->NotifyOS == agTRUE) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMFailed, tiOrgIORequest ); } satDevData->satTmTaskTag = agNULL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return; } /* success */ TI_DBG1(("satCheckPowerModeCB: success\n")); TI_DBG1(("satCheckPowerModeCB: TMF %d\n", satOrgIOContext->TMF)); if (satOrgIOContext->TMF == AG_ABORT_TASK) { AbortTM = agTRUE; } if (satOrgIOContext->NotifyOS == agTRUE) { report = agTRUE; } if (AbortTM == agTRUE) { TI_DBG1(("satCheckPowerModeCB: calling satAbort\n")); satAbort(agRoot, satOrgIOContext->satToBeAbortedIOContext); } satDevData->satTmTaskTag = agNULL; satDevData->satDriveState = SAT_DEV_STATE_NORMAL; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); TI_DBG1(("satCheckPowerModeCB: satPendingIO %d satNCQMaxIO %d\n", satDevData->satPendingIO, satDevData->satNCQMaxIO )); TI_DBG1(("satCheckPowerModeCB: satPendingNCQIO %d satPendingNONNCQIO %d\n", satDevData->satPendingNCQIO, satDevData->satPendingNONNCQIO)); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ if (tdOrgIORequestBody != agNULL) { ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } else { TI_DBG1(("satCheckPowerModeCB: tdOrgIORequestBody is NULL, wrong\n")); } if (report) { ostiInitiatorEvent( tiRoot, NULL, NULL, tiIntrEventTypeTaskManagement, tiTMOK, tiOrgIORequest ); } TI_DBG5(("satCheckPowerModeCB: device %p pending IO %d\n", satDevData, satDevData->satPendingIO)); TI_DBG2(("satCheckPowerModeCB: end\n")); return; } /*****************************************************************************/ /*! \brief SAT implementation for satAddSATAStartIDDev. * * This function sends identify device data to find out the uniqueness * of device. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satAddSATAStartIDDev( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, // NULL satIOContext_t *satIOContext ) { satInternalIo_t *satIntIo = agNULL; satDeviceData_t *satDevData = agNULL; tdIORequestBody_t *tdIORequestBody; satIOContext_t *satNewIOContext; bit32 status; TI_DBG2(("satAddSATAStartIDDev: start\n")); satDevData = satIOContext->pSatDevData; TI_DBG2(("satAddSATAStartIDDev: before alloc\n")); /* allocate identify device command */ satIntIo = satAllocIntIoResource( tiRoot, tiIORequest, satDevData, sizeof(agsaSATAIdentifyData_t), /* 512; size of identify device data */ satIntIo); TI_DBG2(("satAddSATAStartIDDev: after alloc\n")); if (satIntIo == agNULL) { TI_DBG1(("satAddSATAStartIDDev: can't alloacate\n")); return tiError; } /* fill in fields */ /* real ttttttthe one worked and the same; 5/21/07/ */ satIntIo->satOrgTiIORequest = tiIORequest; /* changed */ tdIORequestBody = satIntIo->satIntRequestBody; satNewIOContext = &(tdIORequestBody->transport.SATA.satIOContext); satNewIOContext->pSatDevData = satDevData; satNewIOContext->pFis = &(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev); satNewIOContext->pScsiCmnd = &(satIntIo->satIntTiScsiXchg.scsiCmnd); satNewIOContext->pSense = &(tdIORequestBody->transport.SATA.sensePayload); satNewIOContext->pTiSenseData = &(tdIORequestBody->transport.SATA.tiSenseData); satNewIOContext->tiRequestBody = satIntIo->satIntRequestBody; /* key fix */ satNewIOContext->interruptContext = tiInterruptContext; satNewIOContext->satIntIoContext = satIntIo; satNewIOContext->ptiDeviceHandle = agNULL; satNewIOContext->satOrgIOContext = satIOContext; /* changed */ /* this is valid only for TD layer generated (not triggered by OS at all) IO */ satNewIOContext->tiScsiXchg = &(satIntIo->satIntTiScsiXchg); TI_DBG6(("satAddSATAStartIDDev: OS satIOContext %p \n", satIOContext)); TI_DBG6(("satAddSATAStartIDDev: TD satNewIOContext %p \n", satNewIOContext)); TI_DBG6(("satAddSATAStartIDDev: OS tiScsiXchg %p \n", satIOContext->tiScsiXchg)); TI_DBG6(("satAddSATAStartIDDev: TD tiScsiXchg %p \n", satNewIOContext->tiScsiXchg)); TI_DBG2(("satAddSATAStartIDDev: satNewIOContext %p tdIORequestBody %p\n", satNewIOContext, tdIORequestBody)); status = satAddSATASendIDDev( tiRoot, &satIntIo->satIntTiIORequest, /* New tiIORequest */ tiDeviceHandle, satNewIOContext->tiScsiXchg, /* New tiScsiInitiatorRequest_t *tiScsiRequest, */ satNewIOContext); if (status != tiSuccess) { TI_DBG1(("satAddSATAStartIDDev: failed in sending\n")); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return tiError; } TI_DBG6(("satAddSATAStartIDDev: end\n")); return status; } /*****************************************************************************/ /*! \brief SAT implementation for satAddSATASendIDDev. * * This function creates identify device data fis and send it to LL * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 satAddSATASendIDDev( tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; agsaFisRegHostToDevice_t *fis; #ifdef TD_DEBUG_ENABLE tdIORequestBody_t *tdIORequestBody; satInternalIo_t *satIntIoContext; #endif pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; TI_DBG2(("satAddSATASendIDDev: start\n")); #ifdef TD_DEBUG_ENABLE satIntIoContext = satIOContext->satIntIoContext; tdIORequestBody = satIntIoContext->satIntRequestBody; #endif TI_DBG5(("satAddSATASendIDDev: satIOContext %p tdIORequestBody %p\n", satIOContext, tdIORequestBody)); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ if (pSatDevData->satDeviceType == SATA_ATAPI_DEVICE) fis->h.command = SAT_IDENTIFY_PACKET_DEVICE; /* 0x40 */ else fis->h.command = SAT_IDENTIFY_DEVICE; /* 0xEC */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.device = 0; /* FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &satAddSATAIDDevCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ #ifdef TD_INTERNAL_DEBUG tdhexdump("satAddSATASendIDDev", (bit8 *)satIOContext->pFis, sizeof(agsaFisRegHostToDevice_t)); #ifdef TD_DEBUG_ENABLE tdhexdump("satAddSATASendIDDev LL", (bit8 *)&(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev), sizeof(agsaFisRegHostToDevice_t)); #endif #endif status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG2(("satAddSATASendIDDev: end status %d\n", status)); return status; } /***************************************************************************** *! \brief satAddSATAIDDevCB * * This routine is a callback function for satAddSATASendIDDev() * Using Identify Device Data, this function finds whether devicedata is * new or old. If new, add it to the devicelist. * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param agIORequest: Pointer to the LL I/O request context for this I/O. * \param agIOStatus: Status of completed I/O. * \param agFirstDword:Pointer to the four bytes of FIS. * \param agIOInfoLen: Length in bytes of overrun/underrun residual or FIS * length. * \param agParam: Additional info based on status. * \param ioContext: Pointer to satIOContext_t. * * \return: none * *****************************************************************************/ void satAddSATAIDDevCB( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, agsaFisHeader_t *agFirstDword, bit32 agIOInfoLen, void *agParam, void *ioContext ) { /* In the process of Inquiry Process SAT_IDENTIFY_DEVICE */ tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdIORequestBody_t *tdIORequestBody; tdIORequestBody_t *tdOrgIORequestBody; satIOContext_t *satIOContext; satIOContext_t *satOrgIOContext; satIOContext_t *satNewIOContext; satInternalIo_t *satIntIo; satInternalIo_t *satNewIntIo = agNULL; satDeviceData_t *satDevData; tiIORequest_t *tiOrgIORequest = agNULL; agsaSATAIdentifyData_t *pSATAIdData; bit16 *tmpptr, tmpptr_tmp; bit32 x; tdsaDeviceData_t *NewOneDeviceData = agNULL; tdsaDeviceData_t *oneDeviceData = agNULL; tdList_t *DeviceListList; int new_device = agTRUE; bit8 PhyID; void *sglVirtualAddr; bit32 retry_status; agsaContext_t *agContext; tdsaPortContext_t *onePortContext; bit32 status = 0; TI_DBG2(("satAddSATAIDDevCB: start\n")); TI_DBG6(("satAddSATAIDDevCB: agIORequest=%p agIOStatus=0x%x agIOInfoLen %d\n", agIORequest, agIOStatus, agIOInfoLen)); tdIORequestBody = (tdIORequestBody_t *)agIORequest->osData; satIOContext = (satIOContext_t *) ioContext; satIntIo = satIOContext->satIntIoContext; satDevData = satIOContext->pSatDevData; NewOneDeviceData = (tdsaDeviceData_t *)tdIORequestBody->tiDevHandle->tdData; TI_DBG2(("satAddSATAIDDevCB: NewOneDeviceData %p did %d\n", NewOneDeviceData, NewOneDeviceData->id)); PhyID = NewOneDeviceData->phyID; TI_DBG2(("satAddSATAIDDevCB: phyID %d\n", PhyID)); agContext = &(NewOneDeviceData->agDeviceResetContext); agContext->osData = agNULL; if (satIntIo == agNULL) { TI_DBG1(("satAddSATAIDDevCB: External, OS generated\n")); TI_DBG1(("satAddSATAIDDevCB: Not possible case\n")); satOrgIOContext = satIOContext; tdOrgIORequestBody = (tdIORequestBody_t *)satOrgIOContext->tiRequestBody; tdsaAbortAll(tiRoot, agRoot, NewOneDeviceData); /* put onedevicedata back to free list */ osti_memset(&(NewOneDeviceData->satDevData.satIdentifyData), 0xFF, sizeof(agsaSATAIdentifyData_t)); TDLIST_DEQUEUE_THIS(&(NewOneDeviceData->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(NewOneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceList)); satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); /* notifying link up */ ostiPortEvent ( tiRoot, tiPortLinkUp, tiSuccess, (void *)tdsaAllShared->Ports[PhyID].tiPortalContext ); #ifdef INITIATOR_DRIVER /* triggers discovery */ ostiPortEvent( tiRoot, tiPortDiscoveryReady, tiSuccess, (void *) tdsaAllShared->Ports[PhyID].tiPortalContext ); #endif return; } else { TI_DBG1(("satAddSATAIDDevCB: Internal, TD generated\n")); satOrgIOContext = satIOContext->satOrgIOContext; if (satOrgIOContext == agNULL) { TI_DBG6(("satAddSATAIDDevCB: satOrgIOContext is NULL\n")); return; } else { TI_DBG6(("satAddSATAIDDevCB: satOrgIOContext is NOT NULL\n")); tdOrgIORequestBody = (tdIORequestBody_t *)satOrgIOContext->tiRequestBody; sglVirtualAddr = satIntIo->satIntTiScsiXchg.sglVirtualAddr; } } tiOrgIORequest = tdIORequestBody->tiIORequest; tdIORequestBody->ioCompleted = agTRUE; tdIORequestBody->ioStarted = agFALSE; TI_DBG2(("satAddSATAIDDevCB: satOrgIOContext->pid %d\n", satOrgIOContext->pid)); /* protect against double completion for old port */ if (satOrgIOContext->pid != tdsaAllShared->Ports[PhyID].portContext->id) { TI_DBG2(("satAddSATAIDDevCB: incorrect pid\n")); TI_DBG2(("satAddSATAIDDevCB: satOrgIOContext->pid %d\n", satOrgIOContext->pid)); TI_DBG2(("satAddSATAIDDevCB: tiPortalContext pid %d\n", tdsaAllShared->Ports[PhyID].portContext->id)); tdsaAbortAll(tiRoot, agRoot, NewOneDeviceData); /* put onedevicedata back to free list */ osti_memset(&(NewOneDeviceData->satDevData.satIdentifyData), 0xFF, sizeof(agsaSATAIdentifyData_t)); TDLIST_DEQUEUE_THIS(&(NewOneDeviceData->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(NewOneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceList)); satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); /* no notification to OS layer */ return; } /* completion after portcontext is invalidated */ onePortContext = NewOneDeviceData->tdPortContext; if (onePortContext != agNULL) { if (onePortContext->valid == agFALSE) { TI_DBG1(("satAddSATAIDDevCB: portcontext is invalid\n")); TI_DBG1(("satAddSATAIDDevCB: onePortContext->id pid %d\n", onePortContext->id)); satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); /* no notification to OS layer */ return; } } else { TI_DBG1(("satAddSATAIDDevCB: onePortContext is NULL!!!\n")); return; } if (agFirstDword == agNULL && agIOStatus != OSSA_IO_SUCCESS) { TI_DBG1(("satAddSATAIDDevCB: wrong. agFirstDword is NULL when error, status %d\n", agIOStatus)); if (tdsaAllShared->ResetInDiscovery != 0 && satDevData->ID_Retries < SATA_ID_DEVICE_DATA_RETRIES) { satDevData->satPendingNONNCQIO--; satDevData->satPendingIO--; retry_status = sataLLIOStart(tiRoot, &satIntIo->satIntTiIORequest, &(NewOneDeviceData->tiDeviceHandle), satIOContext->tiScsiXchg, satIOContext); if (retry_status != tiSuccess) { /* simply give up */ satDevData->ID_Retries = 0; satAddSATAIDDevCBCleanup(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); return; } satDevData->ID_Retries++; tdIORequestBody->ioCompleted = agFALSE; tdIORequestBody->ioStarted = agTRUE; return; } else { if (tdsaAllShared->ResetInDiscovery == 0) { satAddSATAIDDevCBCleanup(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); } else /* ResetInDiscovery in on */ { /* RESET only one after ID retries */ if (satDevData->NumOfIDRetries <= 0) { satDevData->NumOfIDRetries++; satDevData->ID_Retries = 0; satAddSATAIDDevCBReset(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); /* send link reset */ saLocalPhyControl(agRoot, agContext, tdsaRotateQnumber(tiRoot, NewOneDeviceData), PhyID, AGSA_PHY_HARD_RESET, agNULL); } else { satDevData->ID_Retries = 0; satAddSATAIDDevCBCleanup(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); } } return; } } if (agIOStatus == OSSA_IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_ZONE_VIOLATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_BREAK || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_BAD_DESTINATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_WRONG_DESTINATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_UNKNOWN_ERROR || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY ) { TI_DBG1(("satAddSATAIDDevCB: OSSA_IO_OPEN_CNX_ERROR\n")); if (tdsaAllShared->ResetInDiscovery != 0 && satDevData->ID_Retries < SATA_ID_DEVICE_DATA_RETRIES) { satDevData->satPendingNONNCQIO--; satDevData->satPendingIO--; retry_status = sataLLIOStart(tiRoot, &satIntIo->satIntTiIORequest, &(NewOneDeviceData->tiDeviceHandle), satIOContext->tiScsiXchg, satIOContext); if (retry_status != tiSuccess) { /* simply give up */ satDevData->ID_Retries = 0; satAddSATAIDDevCBCleanup(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); return; } satDevData->ID_Retries++; tdIORequestBody->ioCompleted = agFALSE; tdIORequestBody->ioStarted = agTRUE; return; } else { if (tdsaAllShared->ResetInDiscovery == 0) { satAddSATAIDDevCBCleanup(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); } else /* ResetInDiscovery in on */ { /* RESET only one after ID retries */ if (satDevData->NumOfIDRetries <= 0) { satDevData->NumOfIDRetries++; satDevData->ID_Retries = 0; satAddSATAIDDevCBReset(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); /* send link reset */ saLocalPhyControl(agRoot, agContext, tdsaRotateQnumber(tiRoot, NewOneDeviceData), PhyID, AGSA_PHY_HARD_RESET, agNULL); } else { satDevData->ID_Retries = 0; satAddSATAIDDevCBCleanup(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); } } return; } } if ( agIOStatus != OSSA_IO_SUCCESS || (agIOStatus == OSSA_IO_SUCCESS && agFirstDword != agNULL && agIOInfoLen != 0) ) { if (tdsaAllShared->ResetInDiscovery != 0 && satDevData->ID_Retries < SATA_ID_DEVICE_DATA_RETRIES) { satIOContext->pSatDevData->satPendingNONNCQIO--; satIOContext->pSatDevData->satPendingIO--; retry_status = sataLLIOStart(tiRoot, &satIntIo->satIntTiIORequest, &(NewOneDeviceData->tiDeviceHandle), satIOContext->tiScsiXchg, satIOContext); if (retry_status != tiSuccess) { /* simply give up */ satDevData->ID_Retries = 0; satAddSATAIDDevCBCleanup(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); return; } satDevData->ID_Retries++; tdIORequestBody->ioCompleted = agFALSE; tdIORequestBody->ioStarted = agTRUE; return; } else { if (tdsaAllShared->ResetInDiscovery == 0) { satAddSATAIDDevCBCleanup(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); } else /* ResetInDiscovery in on */ { /* RESET only one after ID retries */ if (satDevData->NumOfIDRetries <= 0) { satDevData->NumOfIDRetries++; satDevData->ID_Retries = 0; satAddSATAIDDevCBReset(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); /* send link reset */ saLocalPhyControl(agRoot, agContext, tdsaRotateQnumber(tiRoot, NewOneDeviceData), PhyID, AGSA_PHY_HARD_RESET, agNULL); } else { satDevData->ID_Retries = 0; satAddSATAIDDevCBCleanup(agRoot, NewOneDeviceData, satIOContext, tdOrgIORequestBody); } } return; } } /* success */ TI_DBG2(("satAddSATAIDDevCB: Success\n")); /* Convert to host endian */ tmpptr = (bit16*)sglVirtualAddr; //tdhexdump("satAddSATAIDDevCB before", (bit8 *)sglVirtualAddr, sizeof(agsaSATAIdentifyData_t)); for (x=0; x < sizeof(agsaSATAIdentifyData_t)/sizeof(bit16); x++) { OSSA_READ_LE_16(AGROOT, &tmpptr_tmp, tmpptr, 0); *tmpptr = tmpptr_tmp; tmpptr++; /*Print tmpptr_tmp here for debugging purpose*/ } pSATAIdData = (agsaSATAIdentifyData_t *)sglVirtualAddr; //tdhexdump("satAddSATAIDDevCB after", (bit8 *)pSATAIdData, sizeof(agsaSATAIdentifyData_t)); TI_DBG5(("satAddSATAIDDevCB: OS satOrgIOContext %p \n", satOrgIOContext)); TI_DBG5(("satAddSATAIDDevCB: TD satIOContext %p \n", satIOContext)); TI_DBG5(("satAddSATAIDDevCB: OS tiScsiXchg %p \n", satOrgIOContext->tiScsiXchg)); TI_DBG5(("satAddSATAIDDevCB: TD tiScsiXchg %p \n", satIOContext->tiScsiXchg)); /* compare idenitfy device data to the exiting list */ DeviceListList = tdsaAllShared->MainDeviceList.flink; while (DeviceListList != &(tdsaAllShared->MainDeviceList)) { oneDeviceData = TDLIST_OBJECT_BASE(tdsaDeviceData_t, MainLink, DeviceListList); TI_DBG1(("satAddSATAIDDevCB: LOOP oneDeviceData %p did %d\n", oneDeviceData, oneDeviceData->id)); //tdhexdump("satAddSATAIDDevCB LOOP", (bit8 *)&oneDeviceData->satDevData.satIdentifyData, sizeof(agsaSATAIdentifyData_t)); /* what is unique ID for sata device -> response of identify devicedata; not really Let's compare serial number, firmware version, model number */ if ( oneDeviceData->DeviceType == TD_SATA_DEVICE && (osti_memcmp (oneDeviceData->satDevData.satIdentifyData.serialNumber, pSATAIdData->serialNumber, 20) == 0) && (osti_memcmp (oneDeviceData->satDevData.satIdentifyData.firmwareVersion, pSATAIdData->firmwareVersion, 8) == 0) && (osti_memcmp (oneDeviceData->satDevData.satIdentifyData.modelNumber, pSATAIdData->modelNumber, 40) == 0) ) { TI_DBG2(("satAddSATAIDDevCB: did %d\n", oneDeviceData->id)); new_device = agFALSE; break; } DeviceListList = DeviceListList->flink; } if (new_device == agFALSE) { TI_DBG2(("satAddSATAIDDevCB: old device data\n")); oneDeviceData->valid = agTRUE; oneDeviceData->valid2 = agTRUE; /* save data field from new device data */ oneDeviceData->agRoot = agRoot; oneDeviceData->agDevHandle = NewOneDeviceData->agDevHandle; oneDeviceData->agDevHandle->osData = oneDeviceData; /* TD layer */ oneDeviceData->tdPortContext = NewOneDeviceData->tdPortContext; oneDeviceData->phyID = NewOneDeviceData->phyID; /* one SATA directly attached device per phy; Therefore, deregister then register */ saDeregisterDeviceHandle(agRoot, agNULL, NewOneDeviceData->agDevHandle, 0); if (oneDeviceData->registered == agFALSE) { TI_DBG2(("satAddSATAIDDevCB: re-registering old device data\n")); /* already has old information; just register it again */ saRegisterNewDevice( /* satAddSATAIDDevCB */ agRoot, &oneDeviceData->agContext, tdsaRotateQnumber(tiRoot, oneDeviceData), &oneDeviceData->agDeviceInfo, oneDeviceData->tdPortContext->agPortContext, 0 ); } // tdsaAbortAll(tiRoot, agRoot, NewOneDeviceData); /* put onedevicedata back to free list */ osti_memset(&(NewOneDeviceData->satDevData.satIdentifyData), 0xFF, sizeof(agsaSATAIdentifyData_t)); TDLIST_DEQUEUE_THIS(&(NewOneDeviceData->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(NewOneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceList)); satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); if (satDevData->satDeviceType == SATA_ATAPI_DEVICE) { /* send the Set Feature ATA command to ATAPI device for enbling PIO and DMA transfer mode*/ satNewIntIo = satAllocIntIoResource( tiRoot, tiOrgIORequest, satDevData, 0, satNewIntIo); if (satNewIntIo == agNULL) { TI_DBG1(("tdsaDiscoveryStartIDDevCB: momory allocation fails\n")); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } /* end memory allocation */ satNewIOContext = satPrepareNewIO(satNewIntIo, tiOrgIORequest, satDevData, agNULL, satOrgIOContext ); /* enable PIO mode, then enable Ultra DMA mode in the satSetFeaturesCB callback function*/ status = satSetFeatures(tiRoot, &satNewIntIo->satIntTiIORequest, satNewIOContext->ptiDeviceHandle, &satNewIntIo->satIntTiScsiXchg, /* orginal from OS layer */ satNewIOContext, agFALSE); if (status != tiSuccess) { satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } } else { /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); TI_DBG2(("satAddSATAIDDevCB: pid %d\n", tdsaAllShared->Ports[PhyID].portContext->id)); /* notifying link up */ ostiPortEvent( tiRoot, tiPortLinkUp, tiSuccess, (void *)tdsaAllShared->Ports[PhyID].tiPortalContext ); #ifdef INITIATOR_DRIVER /* triggers discovery */ ostiPortEvent( tiRoot, tiPortDiscoveryReady, tiSuccess, (void *) tdsaAllShared->Ports[PhyID].tiPortalContext ); #endif } return; } TI_DBG2(("satAddSATAIDDevCB: new device data\n")); /* copy ID Dev data to satDevData */ satDevData->satIdentifyData = *pSATAIdData; satDevData->IDDeviceValid = agTRUE; #ifdef TD_INTERNAL_DEBUG tdhexdump("satAddSATAIDDevCB ID Dev data",(bit8 *)pSATAIdData, sizeof(agsaSATAIdentifyData_t)); tdhexdump("satAddSATAIDDevCB Device ID Dev data",(bit8 *)&satDevData->satIdentifyData, sizeof(agsaSATAIdentifyData_t)); #endif /* set satDevData fields from IndentifyData */ satSetDevInfo(satDevData,pSATAIdData); satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); if (satDevData->satDeviceType == SATA_ATAPI_DEVICE) { /* send the Set Feature ATA command to ATAPI device for enbling PIO and DMA transfer mode*/ satNewIntIo = satAllocIntIoResource( tiRoot, tiOrgIORequest, satDevData, 0, satNewIntIo); if (satNewIntIo == agNULL) { TI_DBG1(("tdsaDiscoveryStartIDDevCB: momory allocation fails\n")); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } /* end memory allocation */ satNewIOContext = satPrepareNewIO(satNewIntIo, tiOrgIORequest, satDevData, agNULL, satOrgIOContext ); /* enable PIO mode, then enable Ultra DMA mode in the satSetFeaturesCB callback function*/ status = satSetFeatures(tiRoot, &satNewIntIo->satIntTiIORequest, satNewIOContext->ptiDeviceHandle, &satNewIntIo->satIntTiScsiXchg, /* orginal from OS layer */ satNewIOContext, agFALSE); if (status != tiSuccess) { satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } } else { /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); TI_DBG2(("satAddSATAIDDevCB: pid %d\n", tdsaAllShared->Ports[PhyID].portContext->id)); /* notifying link up */ ostiPortEvent ( tiRoot, tiPortLinkUp, tiSuccess, (void *)tdsaAllShared->Ports[PhyID].tiPortalContext ); #ifdef INITIATOR_DRIVER /* triggers discovery */ ostiPortEvent( tiRoot, tiPortDiscoveryReady, tiSuccess, (void *) tdsaAllShared->Ports[PhyID].tiPortalContext ); #endif } TI_DBG2(("satAddSATAIDDevCB: end\n")); return; } /***************************************************************************** *! \brief satAddSATAIDDevCBReset * * This routine cleans up IOs for failed Identify device data * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param oneDeviceData: Pointer to the device data. * \param ioContext: Pointer to satIOContext_t. * \param tdIORequestBody: Pointer to the request body * \param flag: Decrement pending io or not * * \return: none * *****************************************************************************/ void satAddSATAIDDevCBReset( agsaRoot_t *agRoot, tdsaDeviceData_t *oneDeviceData, satIOContext_t *satIOContext, tdIORequestBody_t *tdIORequestBody ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; satInternalIo_t *satIntIo; satDeviceData_t *satDevData; TI_DBG2(("satAddSATAIDDevCBReset: start\n")); satIntIo = satIOContext->satIntIoContext; satDevData = satIOContext->pSatDevData; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } /***************************************************************************** *! \brief satAddSATAIDDevCBCleanup * * This routine cleans up IOs for failed Identify device data * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param oneDeviceData: Pointer to the device data. * \param ioContext: Pointer to satIOContext_t. * \param tdIORequestBody: Pointer to the request body * * \return: none * *****************************************************************************/ void satAddSATAIDDevCBCleanup( agsaRoot_t *agRoot, tdsaDeviceData_t *oneDeviceData, satIOContext_t *satIOContext, tdIORequestBody_t *tdIORequestBody ) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; satInternalIo_t *satIntIo; satDeviceData_t *satDevData; bit8 PhyID; TI_DBG2(("satAddSATAIDDevCBCleanup: start\n")); satIntIo = satIOContext->satIntIoContext; satDevData = satIOContext->pSatDevData; PhyID = oneDeviceData->phyID; tdsaAbortAll(tiRoot, agRoot, oneDeviceData); /* put onedevicedata back to free list */ osti_memset(&(oneDeviceData->satDevData.satIdentifyData), 0xFF, sizeof(agsaSATAIdentifyData_t)); TDLIST_DEQUEUE_THIS(&(oneDeviceData->MainLink)); TDLIST_ENQUEUE_AT_TAIL(&(oneDeviceData->FreeLink), &(tdsaAllShared->FreeDeviceList)); satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); /* notifying link up */ ostiPortEvent ( tiRoot, tiPortLinkUp, tiSuccess, (void *)tdsaAllShared->Ports[PhyID].tiPortalContext ); #ifdef INITIATOR_DRIVER /* triggers discovery */ ostiPortEvent( tiRoot, tiPortDiscoveryReady, tiSuccess, (void *) tdsaAllShared->Ports[PhyID].tiPortalContext ); #endif return; } /*****************************************************************************/ /*! \brief SAT implementation for tdsaDiscoveryStartIDDev. * * This function sends identify device data to SATA device in discovery * * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param oneDeviceData : Pointer to the device data. * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 tdsaDiscoveryStartIDDev(tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, /* agNULL */ tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, /* agNULL */ tdsaDeviceData_t *oneDeviceData ) { void *osMemHandle; tdIORequestBody_t *tdIORequestBody; bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; agsaIORequest_t *agIORequest = agNULL; /* identify device data itself */ satIOContext_t *satIOContext = agNULL; bit32 status; /* allocate tdiorequestbody and call tdsaDiscoveryIntStartIDDev tdsaDiscoveryIntStartIDDev(tiRoot, agNULL, tiDeviceHandle, satIOContext); */ TI_DBG3(("tdsaDiscoveryStartIDDev: start\n")); TI_DBG3(("tdsaDiscoveryStartIDDev: did %d\n", oneDeviceData->id)); /* allocation tdIORequestBody and pass it to satTM() */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { TI_DBG1(("tdsaDiscoveryStartIDDev: ostiAllocMemory failed... loc 1\n")); return tiError; } if (tdIORequestBody == agNULL) { TI_DBG1(("tdsaDiscoveryStartIDDev: ostiAllocMemory returned NULL tdIORequestBody loc 2\n")); return tiError; } /* setup identify device data IO structure */ tdIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; tdIORequestBody->IOType.InitiatorTMIO.CurrentTaskTag = agNULL; tdIORequestBody->IOType.InitiatorTMIO.TaskTag = agNULL; /* initialize tiDevhandle */ tdIORequestBody->tiDevHandle = &(oneDeviceData->tiDeviceHandle); tdIORequestBody->tiDevHandle->tdData = oneDeviceData; /* initialize tiIORequest */ tdIORequestBody->tiIORequest = agNULL; /* initialize agIORequest */ agIORequest = &(tdIORequestBody->agIORequest); agIORequest->osData = (void *) tdIORequestBody; agIORequest->sdkData = agNULL; /* SA takes care of this */ /* set up satIOContext */ satIOContext = &(tdIORequestBody->transport.SATA.satIOContext); satIOContext->pSatDevData = &(oneDeviceData->satDevData); satIOContext->pFis = &(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev); satIOContext->tiRequestBody = tdIORequestBody; satIOContext->ptiDeviceHandle = &(oneDeviceData->tiDeviceHandle); satIOContext->tiScsiXchg = agNULL; satIOContext->satIntIoContext = agNULL; satIOContext->satOrgIOContext = agNULL; /* followings are used only for internal IO */ satIOContext->currentLBA = 0; satIOContext->OrgTL = 0; satIOContext->satToBeAbortedIOContext = agNULL; satIOContext->NotifyOS = agFALSE; /* saving port ID just in case of full discovery to full discovery transition */ satIOContext->pid = oneDeviceData->tdPortContext->id; osti_memset(&(oneDeviceData->satDevData.satIdentifyData), 0x0, sizeof(agsaSATAIdentifyData_t)); status = tdsaDiscoveryIntStartIDDev(tiRoot, tiIORequest, /* agNULL */ tiDeviceHandle, /* &(oneDeviceData->tiDeviceHandle)*/ agNULL, satIOContext ); if (status != tiSuccess) { TI_DBG1(("tdsaDiscoveryStartIDDev: failed in sending %d\n", status)); ostiFreeMemory(tiRoot, osMemHandle, sizeof(tdIORequestBody_t)); } return status; } /*****************************************************************************/ /*! \brief SAT implementation for tdsaDiscoveryIntStartIDDev. * * This function sends identify device data to SATA device. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 tdsaDiscoveryIntStartIDDev(tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, /* agNULL */ tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, /* agNULL */ satIOContext_t *satIOContext ) { satInternalIo_t *satIntIo = agNULL; satDeviceData_t *satDevData = agNULL; tdIORequestBody_t *tdIORequestBody; satIOContext_t *satNewIOContext; bit32 status; TI_DBG3(("tdsaDiscoveryIntStartIDDev: start\n")); satDevData = satIOContext->pSatDevData; /* allocate identify device command */ satIntIo = satAllocIntIoResource( tiRoot, tiIORequest, satDevData, sizeof(agsaSATAIdentifyData_t), /* 512; size of identify device data */ satIntIo); if (satIntIo == agNULL) { TI_DBG2(("tdsaDiscoveryIntStartIDDev: can't alloacate\n")); return tiError; } /* fill in fields */ /* real ttttttthe one worked and the same; 5/21/07/ */ satIntIo->satOrgTiIORequest = tiIORequest; /* changed */ tdIORequestBody = satIntIo->satIntRequestBody; satNewIOContext = &(tdIORequestBody->transport.SATA.satIOContext); satNewIOContext->pSatDevData = satDevData; satNewIOContext->pFis = &(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev); satNewIOContext->pScsiCmnd = &(satIntIo->satIntTiScsiXchg.scsiCmnd); satNewIOContext->pSense = &(tdIORequestBody->transport.SATA.sensePayload); satNewIOContext->pTiSenseData = &(tdIORequestBody->transport.SATA.tiSenseData); satNewIOContext->tiRequestBody = satIntIo->satIntRequestBody; /* key fix */ satNewIOContext->interruptContext = tiInterruptContext; satNewIOContext->satIntIoContext = satIntIo; satNewIOContext->ptiDeviceHandle = agNULL; satNewIOContext->satOrgIOContext = satIOContext; /* changed */ /* this is valid only for TD layer generated (not triggered by OS at all) IO */ satNewIOContext->tiScsiXchg = &(satIntIo->satIntTiScsiXchg); TI_DBG6(("tdsaDiscoveryIntStartIDDev: OS satIOContext %p \n", satIOContext)); TI_DBG6(("tdsaDiscoveryIntStartIDDev: TD satNewIOContext %p \n", satNewIOContext)); TI_DBG6(("tdsaDiscoveryIntStartIDDev: OS tiScsiXchg %p \n", satIOContext->tiScsiXchg)); TI_DBG6(("tdsaDiscoveryIntStartIDDev: TD tiScsiXchg %p \n", satNewIOContext->tiScsiXchg)); TI_DBG3(("tdsaDiscoveryIntStartIDDev: satNewIOContext %p tdIORequestBody %p\n", satNewIOContext, tdIORequestBody)); status = tdsaDiscoverySendIDDev(tiRoot, &satIntIo->satIntTiIORequest, /* New tiIORequest */ tiDeviceHandle, satNewIOContext->tiScsiXchg, /* New tiScsiInitiatorRequest_t *tiScsiRequest, */ satNewIOContext); if (status != tiSuccess) { TI_DBG1(("tdsaDiscoveryIntStartIDDev: failed in sending %d\n", status)); satFreeIntIoResource( tiRoot, satDevData, satIntIo); return tiError; } TI_DBG6(("tdsaDiscoveryIntStartIDDev: end\n")); return status; } /*****************************************************************************/ /*! \brief SAT implementation for tdsaDiscoverySendIDDev. * * This function prepares identify device data FIS and sends it to SATA device. * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param tiIORequest: Pointer to TISA I/O request context for this I/O. * \param tiDeviceHandle: Pointer to TISA device handle for this I/O. * \param tiScsiRequest: Pointer to TISA SCSI I/O request and SGL list. * \param satIOContext_t: Pointer to the SAT IO Context * * \return If command is started successfully * - \e tiSuccess: I/O request successfully initiated. * - \e tiBusy: No resources available, try again later. * - \e tiIONoDevice: Invalid device handle. * - \e tiError: Other errors. */ /*****************************************************************************/ GLOBAL bit32 tdsaDiscoverySendIDDev(tiRoot_t *tiRoot, tiIORequest_t *tiIORequest, tiDeviceHandle_t *tiDeviceHandle, tiScsiInitiatorRequest_t *tiScsiRequest, satIOContext_t *satIOContext ) { bit32 status; bit32 agRequestType; satDeviceData_t *pSatDevData; agsaFisRegHostToDevice_t *fis; #ifdef TD_DEBUG_ENABLE tdIORequestBody_t *tdIORequestBody; satInternalIo_t *satIntIoContext; #endif pSatDevData = satIOContext->pSatDevData; fis = satIOContext->pFis; TI_DBG3(("tdsaDiscoverySendIDDev: start\n")); #ifdef TD_DEBUG_ENABLE satIntIoContext = satIOContext->satIntIoContext; tdIORequestBody = satIntIoContext->satIntRequestBody; #endif TI_DBG5(("tdsaDiscoverySendIDDev: satIOContext %p tdIORequestBody %p\n", satIOContext, tdIORequestBody)); fis->h.fisType = 0x27; /* Reg host to device */ fis->h.c_pmPort = 0x80; /* C Bit is set */ if (pSatDevData->satDeviceType == SATA_ATAPI_DEVICE) fis->h.command = SAT_IDENTIFY_PACKET_DEVICE; /* 0xA1 */ else fis->h.command = SAT_IDENTIFY_DEVICE; /* 0xEC */ fis->h.features = 0; /* FIS reserve */ fis->d.lbaLow = 0; /* FIS LBA (7 :0 ) */ fis->d.lbaMid = 0; /* FIS LBA (15:8 ) */ fis->d.lbaHigh = 0; /* FIS LBA (23:16) */ fis->d.device = 0; /* FIS LBA mode */ fis->d.lbaLowExp = 0; fis->d.lbaMidExp = 0; fis->d.lbaHighExp = 0; fis->d.featuresExp = 0; fis->d.sectorCount = 0; /* FIS sector count (7:0) */ fis->d.sectorCountExp = 0; fis->d.reserved4 = 0; fis->d.control = 0; /* FIS HOB bit clear */ fis->d.reserved5 = 0; agRequestType = AGSA_SATA_PROTOCOL_PIO_READ; /* Initialize CB for SATA completion. */ satIOContext->satCompleteCB = &tdsaDiscoveryStartIDDevCB; /* * Prepare SGL and send FIS to LL layer. */ satIOContext->reqType = agRequestType; /* Save it */ #ifdef TD_INTERNAL_DEBUG tdhexdump("tdsaDiscoverySendIDDev", (bit8 *)satIOContext->pFis, sizeof(agsaFisRegHostToDevice_t)); #ifdef TD_DEBUG_ENABLE tdhexdump("tdsaDiscoverySendIDDev LL", (bit8 *)&(tdIORequestBody->transport.SATA.agSATARequestBody.fis.fisRegHostToDev), sizeof(agsaFisRegHostToDevice_t)); #endif #endif status = sataLLIOStart( tiRoot, tiIORequest, tiDeviceHandle, tiScsiRequest, satIOContext); TI_DBG3(("tdsaDiscoverySendIDDev: end status %d\n", status)); return status; } /***************************************************************************** *! \brief tdsaDiscoveryStartIDDevCB * * This routine is a callback function for tdsaDiscoverySendIDDev() * Using Identify Device Data, this function finds whether devicedata is * new or old. If new, add it to the devicelist. This is done as a part * of discovery. * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param agIORequest: Pointer to the LL I/O request context for this I/O. * \param agIOStatus: Status of completed I/O. * \param agFirstDword:Pointer to the four bytes of FIS. * \param agIOInfoLen: Length in bytes of overrun/underrun residual or FIS * length. * \param agParam: Additional info based on status. * \param ioContext: Pointer to satIOContext_t. * * \return: none * *****************************************************************************/ void tdsaDiscoveryStartIDDevCB( agsaRoot_t *agRoot, agsaIORequest_t *agIORequest, bit32 agIOStatus, agsaFisHeader_t *agFirstDword, bit32 agIOInfoLen, void *agParam, void *ioContext ) { /* In the process of SAT_IDENTIFY_DEVICE during discovery */ tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdsaRoot_t *tdsaRoot = (tdsaRoot_t *) tiRoot->tdData; tdsaContext_t *tdsaAllShared = (tdsaContext_t *)&tdsaRoot->tdsaAllShared; tdIORequestBody_t *tdIORequestBody; tdIORequestBody_t *tdOrgIORequestBody; satIOContext_t *satIOContext; satIOContext_t *satOrgIOContext; satIOContext_t *satNewIOContext; satInternalIo_t *satIntIo; satInternalIo_t *satNewIntIo = agNULL; satDeviceData_t *satDevData; tiIORequest_t *tiOrgIORequest = agNULL; #ifdef TD_DEBUG_ENABLE bit32 ataStatus = 0; bit32 ataError; agsaFisPioSetupHeader_t *satPIOSetupHeader = agNULL; #endif agsaSATAIdentifyData_t *pSATAIdData; bit16 *tmpptr, tmpptr_tmp; bit32 x; tdsaDeviceData_t *oneDeviceData = agNULL; void *sglVirtualAddr; tdsaPortContext_t *onePortContext = agNULL; tiPortalContext_t *tiPortalContext = agNULL; bit32 retry_status; TI_DBG3(("tdsaDiscoveryStartIDDevCB: start\n")); tdIORequestBody = (tdIORequestBody_t *)agIORequest->osData; satIOContext = (satIOContext_t *) ioContext; satIntIo = satIOContext->satIntIoContext; satDevData = satIOContext->pSatDevData; oneDeviceData = (tdsaDeviceData_t *)tdIORequestBody->tiDevHandle->tdData; TI_DBG3(("tdsaDiscoveryStartIDDevCB: did %d\n", oneDeviceData->id)); onePortContext = oneDeviceData->tdPortContext; if (onePortContext == agNULL) { TI_DBG1(("tdsaDiscoveryStartIDDevCB: onePortContext is NULL\n")); return; } tiPortalContext= onePortContext->tiPortalContext; satDevData->IDDeviceValid = agFALSE; if (satIntIo == agNULL) { TI_DBG1(("tdsaDiscoveryStartIDDevCB: External, OS generated\n")); TI_DBG1(("tdsaDiscoveryStartIDDevCB: Not possible case\n")); satOrgIOContext = satIOContext; tdOrgIORequestBody = (tdIORequestBody_t *)satOrgIOContext->tiRequestBody; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } else { TI_DBG3(("tdsaDiscoveryStartIDDevCB: Internal, TD generated\n")); satOrgIOContext = satIOContext->satOrgIOContext; if (satOrgIOContext == agNULL) { TI_DBG6(("tdsaDiscoveryStartIDDevCB: satOrgIOContext is NULL\n")); return; } else { TI_DBG6(("tdsaDiscoveryStartIDDevCB: satOrgIOContext is NOT NULL\n")); tdOrgIORequestBody = (tdIORequestBody_t *)satOrgIOContext->tiRequestBody; sglVirtualAddr = satIntIo->satIntTiScsiXchg.sglVirtualAddr; } } tiOrgIORequest = tdIORequestBody->tiIORequest; tdIORequestBody->ioCompleted = agTRUE; tdIORequestBody->ioStarted = agFALSE; TI_DBG3(("tdsaDiscoveryStartIDDevCB: satOrgIOContext->pid %d\n", satOrgIOContext->pid)); /* protect against double completion for old port */ if (satOrgIOContext->pid != oneDeviceData->tdPortContext->id) { TI_DBG3(("tdsaDiscoveryStartIDDevCB: incorrect pid\n")); TI_DBG3(("tdsaDiscoveryStartIDDevCB: satOrgIOContext->pid %d\n", satOrgIOContext->pid)); TI_DBG3(("tdsaDiscoveryStartIDDevCB: tiPortalContext pid %d\n", oneDeviceData->tdPortContext->id)); satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } /* completion after portcontext is invalidated */ if (onePortContext != agNULL) { if (onePortContext->valid == agFALSE) { TI_DBG1(("tdsaDiscoveryStartIDDevCB: portcontext is invalid\n")); TI_DBG1(("tdsaDiscoveryStartIDDevCB: onePortContext->id pid %d\n", onePortContext->id)); satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); /* no notification to OS layer */ return; } } if (agFirstDword == agNULL && agIOStatus != OSSA_IO_SUCCESS) { TI_DBG1(("tdsaDiscoveryStartIDDevCB: agFirstDword is NULL when error, status %d\n", agIOStatus)); TI_DBG1(("tdsaDiscoveryStartIDDevCB: did %d\n", oneDeviceData->id)); if (tdsaAllShared->ResetInDiscovery != 0 && satDevData->ID_Retries < SATA_ID_DEVICE_DATA_RETRIES) { satIOContext->pSatDevData->satPendingNONNCQIO--; satIOContext->pSatDevData->satPendingIO--; retry_status = sataLLIOStart(tiRoot, &satIntIo->satIntTiIORequest, &(oneDeviceData->tiDeviceHandle), satIOContext->tiScsiXchg, satIOContext); if (retry_status != tiSuccess) { /* simply give up */ satDevData->ID_Retries = 0; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } satDevData->ID_Retries++; tdIORequestBody->ioCompleted = agFALSE; tdIORequestBody->ioStarted = agTRUE; return; } else { satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); if (tdsaAllShared->ResetInDiscovery != 0) { /* ResetInDiscovery in on */ if (satDevData->NumOfIDRetries <= 0) { satDevData->NumOfIDRetries++; satDevData->ID_Retries = 0; /* send link reset */ tdsaPhyControlSend(tiRoot, oneDeviceData, SMP_PHY_CONTROL_HARD_RESET, agNULL, tdsaRotateQnumber(tiRoot, oneDeviceData) ); } } return; } } if (agIOStatus == OSSA_IO_ABORTED || agIOStatus == OSSA_IO_UNDERFLOW || agIOStatus == OSSA_IO_XFER_ERROR_BREAK || agIOStatus == OSSA_IO_XFER_ERROR_PHY_NOT_READY || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_BREAK || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_BAD_DESTINATION || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_WRONG_DESTINATION || agIOStatus == OSSA_IO_XFER_ERROR_NAK_RECEIVED || agIOStatus == OSSA_IO_XFER_ERROR_DMA || agIOStatus == OSSA_IO_XFER_ERROR_SATA_LINK_TIMEOUT || agIOStatus == OSSA_IO_XFER_ERROR_REJECTED_NCQ_MODE || agIOStatus == OSSA_IO_XFER_OPEN_RETRY_TIMEOUT || agIOStatus == OSSA_IO_NO_DEVICE || agIOStatus == OSSA_IO_OPEN_CNX_ERROR_ZONE_VIOLATION || agIOStatus == OSSA_IO_PORT_IN_RESET || agIOStatus == OSSA_IO_DS_NON_OPERATIONAL || agIOStatus == OSSA_IO_DS_IN_RECOVERY || agIOStatus == OSSA_IO_DS_IN_ERROR ) { TI_DBG1(("tdsaDiscoveryStartIDDevCB: OSSA_IO_OPEN_CNX_ERROR 0x%x\n", agIOStatus)); if (tdsaAllShared->ResetInDiscovery != 0 && satDevData->ID_Retries < SATA_ID_DEVICE_DATA_RETRIES) { satIOContext->pSatDevData->satPendingNONNCQIO--; satIOContext->pSatDevData->satPendingIO--; retry_status = sataLLIOStart(tiRoot, &satIntIo->satIntTiIORequest, &(oneDeviceData->tiDeviceHandle), satIOContext->tiScsiXchg, satIOContext); if (retry_status != tiSuccess) { /* simply give up */ satDevData->ID_Retries = 0; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } satDevData->ID_Retries++; tdIORequestBody->ioCompleted = agFALSE; tdIORequestBody->ioStarted = agTRUE; return; } else { satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); if (tdsaAllShared->ResetInDiscovery != 0) { /* ResetInDiscovery in on */ if (satDevData->NumOfIDRetries <= 0) { satDevData->NumOfIDRetries++; satDevData->ID_Retries = 0; /* send link reset */ tdsaPhyControlSend(tiRoot, oneDeviceData, SMP_PHY_CONTROL_HARD_RESET, agNULL, tdsaRotateQnumber(tiRoot, oneDeviceData) ); } } return; } } if ( agIOStatus != OSSA_IO_SUCCESS || (agIOStatus == OSSA_IO_SUCCESS && agFirstDword != agNULL && agIOInfoLen != 0) ) { #ifdef TD_DEBUG_ENABLE /* only agsaFisPioSetup_t is expected */ satPIOSetupHeader = (agsaFisPioSetupHeader_t *)&(agFirstDword->PioSetup); ataStatus = satPIOSetupHeader->status; /* ATA Status register */ ataError = satPIOSetupHeader->error; /* ATA Eror register */ #endif TI_DBG1(("tdsaDiscoveryStartIDDevCB: ataStatus 0x%x ataError 0x%x\n", ataStatus, ataError)); if (tdsaAllShared->ResetInDiscovery != 0 && satDevData->ID_Retries < SATA_ID_DEVICE_DATA_RETRIES) { satIOContext->pSatDevData->satPendingNONNCQIO--; satIOContext->pSatDevData->satPendingIO--; retry_status = sataLLIOStart(tiRoot, &satIntIo->satIntTiIORequest, &(oneDeviceData->tiDeviceHandle), satIOContext->tiScsiXchg, satIOContext); if (retry_status != tiSuccess) { /* simply give up */ satDevData->ID_Retries = 0; satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } satDevData->ID_Retries++; tdIORequestBody->ioCompleted = agFALSE; tdIORequestBody->ioStarted = agTRUE; return; } else { satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); if (tdsaAllShared->ResetInDiscovery != 0) { /* ResetInDiscovery in on */ if (satDevData->NumOfIDRetries <= 0) { satDevData->NumOfIDRetries++; satDevData->ID_Retries = 0; /* send link reset */ tdsaPhyControlSend(tiRoot, oneDeviceData, SMP_PHY_CONTROL_HARD_RESET, agNULL, tdsaRotateQnumber(tiRoot, oneDeviceData) ); } } return; } } /* success */ TI_DBG3(("tdsaDiscoveryStartIDDevCB: Success\n")); TI_DBG3(("tdsaDiscoveryStartIDDevCB: Success did %d\n", oneDeviceData->id)); /* Convert to host endian */ tmpptr = (bit16*)sglVirtualAddr; for (x=0; x < sizeof(agsaSATAIdentifyData_t)/sizeof(bit16); x++) { OSSA_READ_LE_16(AGROOT, &tmpptr_tmp, tmpptr, 0); *tmpptr = tmpptr_tmp; tmpptr++; } pSATAIdData = (agsaSATAIdentifyData_t *)sglVirtualAddr; //tdhexdump("satAddSATAIDDevCB before", (bit8 *)pSATAIdData, sizeof(agsaSATAIdentifyData_t)); TI_DBG5(("tdsaDiscoveryStartIDDevCB: OS satOrgIOContext %p \n", satOrgIOContext)); TI_DBG5(("tdsaDiscoveryStartIDDevCB: TD satIOContext %p \n", satIOContext)); TI_DBG5(("tdsaDiscoveryStartIDDevCB: OS tiScsiXchg %p \n", satOrgIOContext->tiScsiXchg)); TI_DBG5(("tdsaDiscoveryStartIDDevCB: TD tiScsiXchg %p \n", satIOContext->tiScsiXchg)); /* copy ID Dev data to satDevData */ satDevData->satIdentifyData = *pSATAIdData; satDevData->IDDeviceValid = agTRUE; #ifdef TD_INTERNAL_DEBUG tdhexdump("tdsaDiscoveryStartIDDevCB ID Dev data",(bit8 *)pSATAIdData, sizeof(agsaSATAIdentifyData_t)); tdhexdump("tdsaDiscoveryStartIDDevCB Device ID Dev data",(bit8 *)&satDevData->satIdentifyData, sizeof(agsaSATAIdentifyData_t)); #endif /* set satDevData fields from IndentifyData */ satSetDevInfo(satDevData,pSATAIdData); satDecrementPendingIO(tiRoot, tdsaAllShared, satIOContext); satFreeIntIoResource( tiRoot, satDevData, satIntIo); if (satDevData->satDeviceType == SATA_ATAPI_DEVICE) { /* send the Set Feature ATA command to ATAPI device for enbling PIO and DMA transfer mode*/ satNewIntIo = satAllocIntIoResource( tiRoot, tiOrgIORequest, satDevData, 0, satNewIntIo); if (satNewIntIo == agNULL) { TI_DBG1(("tdsaDiscoveryStartIDDevCB: momory allocation fails\n")); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); return; } /* end memory allocation */ satNewIOContext = satPrepareNewIO(satNewIntIo, tiOrgIORequest, satDevData, agNULL, satOrgIOContext ); /* enable PIO mode, then enable Ultra DMA mode in the satSetFeaturesCB callback function*/ retry_status = satSetFeatures(tiRoot, &satNewIntIo->satIntTiIORequest, satNewIOContext->ptiDeviceHandle, &satNewIntIo->satIntTiScsiXchg, /* orginal from OS layer */ satNewIOContext, agFALSE); if (retry_status != tiSuccess) { satFreeIntIoResource(tiRoot, satDevData, satIntIo); /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); } } else { /* clean up TD layer's IORequestBody */ ostiFreeMemory( tiRoot, tdOrgIORequestBody->IOType.InitiatorTMIO.osMemHandle, sizeof(tdIORequestBody_t) ); if (onePortContext != agNULL) { if (onePortContext->DiscoveryState == ITD_DSTATE_COMPLETED) { TI_DBG1(("tdsaDiscoveryStartIDDevCB: ID completed after discovery is done; tiDeviceArrival\n")); /* in case registration is finished after discovery is finished */ ostiInitiatorEvent( tiRoot, tiPortalContext, agNULL, tiIntrEventTypeDeviceChange, tiDeviceArrival, agNULL ); } } else { TI_DBG1(("tdsaDiscoveryStartIDDevCB: onePortContext is NULL, wrong\n")); } } TI_DBG3(("tdsaDiscoveryStartIDDevCB: end\n")); return; } /***************************************************************************** *! \brief satAbort * * This routine does local abort for outstanding FIS. * * \param agRoot: Handles for this instance of SAS/SATA hardware * \param satIOContext: Pointer to satIOContext_t. * * \return: none * *****************************************************************************/ GLOBAL void satAbort(agsaRoot_t *agRoot, satIOContext_t *satIOContext) { tdsaRootOsData_t *osData = (tdsaRootOsData_t *)agRoot->osData; tiRoot_t *tiRoot = (tiRoot_t *)osData->tiRoot; tdIORequestBody_t *tdIORequestBody; /* io to be aborted */ tdIORequestBody_t *tdAbortIORequestBody; /* abort io itself */ agsaIORequest_t *agToBeAbortedIORequest; /* io to be aborted */ agsaIORequest_t *agAbortIORequest; /* abort io itself */ bit32 PhysUpper32; bit32 PhysLower32; bit32 memAllocStatus; void *osMemHandle; TI_DBG1(("satAbort: start\n")); if (satIOContext == agNULL) { TI_DBG1(("satAbort: satIOContext is NULL, wrong\n")); return; } tdIORequestBody = (tdIORequestBody_t *)satIOContext->tiRequestBody; agToBeAbortedIORequest = (agsaIORequest_t *)&(tdIORequestBody->agIORequest); /* allocating agIORequest for abort itself */ memAllocStatus = ostiAllocMemory( tiRoot, &osMemHandle, (void **)&tdAbortIORequestBody, &PhysUpper32, &PhysLower32, 8, sizeof(tdIORequestBody_t), agTRUE ); if (memAllocStatus != tiSuccess) { /* let os process IO */ TI_DBG1(("satAbort: ostiAllocMemory failed...\n")); return; } if (tdAbortIORequestBody == agNULL) { /* let os process IO */ TI_DBG1(("satAbort: ostiAllocMemory returned NULL tdAbortIORequestBody\n")); return; } /* setup task management structure */ tdAbortIORequestBody->IOType.InitiatorTMIO.osMemHandle = osMemHandle; tdAbortIORequestBody->tiDevHandle = tdIORequestBody->tiDevHandle; /* initialize agIORequest */ agAbortIORequest = &(tdAbortIORequestBody->agIORequest); agAbortIORequest->osData = (void *) tdAbortIORequestBody; agAbortIORequest->sdkData = agNULL; /* LL takes care of this */ /* * Issue abort */ saSATAAbort( agRoot, agAbortIORequest, 0, agNULL, 0, agToBeAbortedIORequest, agNULL ); TI_DBG1(("satAbort: end\n")); return; } /***************************************************************************** *! \brief satSATADeviceReset * * This routine is called to reset all phys of port which a device belongs to * * \param tiRoot: Pointer to TISA initiator driver/port instance. * \param oneDeviceData: Pointer to the device data. * \param flag: reset flag * * \return: * * none * *****************************************************************************/ osGLOBAL void satSATADeviceReset( tiRoot_t *tiRoot, tdsaDeviceData_t *oneDeviceData, bit32 flag) { agsaRoot_t *agRoot; tdsaPortContext_t *onePortContext; bit32 i; TI_DBG1(("satSATADeviceReset: start\n")); agRoot = oneDeviceData->agRoot; onePortContext = oneDeviceData->tdPortContext; if (agRoot == agNULL) { TI_DBG1(("satSATADeviceReset: Error!!! agRoot is NULL\n")); return; } if (onePortContext == agNULL) { TI_DBG1(("satSATADeviceReset: Error!!! onePortContext is NULL\n")); return; } for(i=0;iPhyIDList[i] == agTRUE) { saLocalPhyControl(agRoot, agNULL, tdsaRotateQnumber(tiRoot, agNULL), i, flag, agNULL); } } return; } #endif /* #ifdef SATA_ENABLE */ Index: head/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c =================================================================== --- head/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c (revision 359440) +++ head/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c (revision 359441) @@ -1,6635 +1,6635 @@ /******************************************************************************* ** *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, * *INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE *ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS *OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, *WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF *THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ** *******************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #define MAJOR_REVISION 1 #define MINOR_REVISION 3 #define BUILD_REVISION 10800 #include // defines used in kernel.h #include #include #include #include #include // types used in module initialization #include // cdevsw struct #include // uio struct #include #include #include // structs, prototypes for pci bus stuff #include #include #include #include // 1. for vtophys #include // 2. for vtophys #include // For pci_get macros #include #include #include #include #include #include #include #include #include #include #include #include #include // #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE( M_PMC_MCCB, "CCB List", "CCB List for PMCS driver" ); MALLOC_DEFINE( M_PMC_MSTL, "STLock malloc", "allocated in agtiapi_attach as memory for lock use" ); MALLOC_DEFINE( M_PMC_MDVT, "ag_device_t malloc", "allocated in agtiapi_attach as mem for ag_device_t pDevList" ); MALLOC_DEFINE( M_PMC_MPRT, "ag_portal_data_t malloc", "allocated in agtiapi_attach as mem for *pPortalData" ); MALLOC_DEFINE( M_PMC_MDEV, "tiDeviceHandle_t * malloc", "allocated in agtiapi_GetDevHandle as local mem for **agDev" ); MALLOC_DEFINE( M_PMC_MFLG, "lDevFlags * malloc", "allocated in agtiapi_GetDevHandle as local mem for * flags" ); #ifdef LINUX_PERBI_SUPPORT MALLOC_DEFINE( M_PMC_MSLR, "ag_slr_map_t malloc", "mem allocated in agtiapi_attach for pSLRList" ); MALLOC_DEFINE( M_PMC_MTGT, "ag_tgt_map_t malloc", "mem allocated in agtiapi_attach for pWWNList" ); #endif MALLOC_DEFINE(TEMP,"tempbuff","buffer for payload"); MALLOC_DEFINE(TEMP2, "tempbuff", "buffer for agtiapi_getdevlist"); STATIC U32 agtiapi_intx_mode = 0; STATIC U08 ag_Perbi = 0; STATIC U32 agtiapi_polling_mode = 0; STATIC U32 ag_card_good = 0; // * total card initialized STATIC U32 ag_option_flag = 0; // * adjustable parameter flag STATIC U32 agtiapi_1st_time = 1; STATIC U32 ag_timeout_secs = 10; //Made timeout equivalent to linux U32 gTiDebugLevel = 1; S32 ag_encryption_enable = 0; atomic_t outstanding_encrypted_io_count; #define cache_line_size() CACHE_LINE_SIZE #define PMCoffsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #define CPU_TO_LE32(dst, src) \ dst.lower = htole32(LOW_32_BITS(src)); \ dst.upper = htole32(HIGH_32_BITS(src)) #define CMND_TO_CHANNEL( ccb ) ( ccb->ccb_h.path_id ) #define CMND_TO_TARGET( ccb ) ( ccb->ccb_h.target_id ) #define CMND_TO_LUN( ccb ) ( ccb->ccb_h.target_lun ) STATIC U08 agtiapi_AddrModes[AGTIAPI_MAX_CHANNEL_NUM + 1] = { AGTIAPI_PERIPHERAL }; #ifdef LINUX_PERBI_SUPPORT // Holding area for target-WWN mapping assignments on the boot line static ag_mapping_t *agMappingList = NULL; // modified by agtiapi_Setup() #endif // * For Debugging Purpose #ifdef AGTIAPI_DEBUG #define AGTIAPI_WWN(name, len) wwnprintk(name, len) #else #define AGTIAPI_WWN(name, len) #endif #define AGTIAPI_WWNPRINTK(name, len, format, a...) \ AGTIAPI_PRINTK(format "name ", a); \ AGTIAPI_WWN((unsigned char*)name, len); #define AGTIAPI_ERR_WWNPRINTK(name, len, format, a...) \ printk(KERN_DEBUG format "name ", ## a); \ wwnprintk((unsigned char*)name, len); #define AGTIAPI_CPY_DEV_INFO(root, dev, pDev) \ tiINIGetDeviceInfo(root, dev, &pDev->devInfo); \ wwncpy(pDev); #ifdef AGTIAPI_LOCAL_LOCK #define AG_CARD_LOCAL_LOCK(lock) ,(lock) #define AG_SPIN_LOCK_IRQ(lock, flags) #define AG_SPIN_UNLOCK_IRQ(lock, flags) #define AG_SPIN_LOCK(lock) #define AG_SPIN_UNLOCK(lock) #define AG_GLOBAL_ARG(arg) #define AG_PERF_SPINLOCK(lock) #define AG_PERF_SPINLOCK_IRQ(lock, flags) #define AG_LOCAL_LOCK(lock) if (lock) \ mtx_lock(lock) #define AG_LOCAL_UNLOCK(lock) if (lock) \ mtx_unlock(lock) #define AG_LOCAL_FLAGS(_flags) unsigned long _flags = 0 #endif #define AG_GET_DONE_PCCB(pccb, pmcsc) \ { \ AG_LOCAL_LOCK(&pmcsc->doneLock); \ pccb = pmcsc->ccbDoneHead; \ if (pccb != NULL) \ { \ pmcsc->ccbDoneHead = NULL; \ pmcsc->ccbDoneTail = NULL; \ AG_LOCAL_UNLOCK(&pmcsc->doneLock); \ agtiapi_Done(pmcsc, pccb); \ } \ else \ AG_LOCAL_UNLOCK(&pmcsc->doneLock); \ } #define AG_GET_DONE_SMP_PCCB(pccb, pmcsc) \ { \ AG_LOCAL_LOCK(&pmcsc->doneSMPLock); \ pccb = pmcsc->smpDoneHead; \ if (pccb != NULL) \ { \ pmcsc->smpDoneHead = NULL; \ pmcsc->smpDoneTail = NULL; \ AG_LOCAL_UNLOCK(&pmcsc->doneSMPLock); \ agtiapi_SMPDone(pmcsc, pccb); \ } \ else \ AG_LOCAL_UNLOCK(&pmcsc->doneSMPLock); \ } #ifdef AGTIAPI_DUMP_IO_DEBUG #define AG_IO_DUMPCCB(pccb) agtiapi_DumpCCB(pccb) #else #define AG_IO_DUMPCCB(pccb) #endif #define SCHED_DELAY_JIFFIES 4 /* in seconds */ #ifdef HOTPLUG_SUPPORT #define AG_HOTPLUG_LOCK_INIT(lock) mxt_init(lock) #define AG_LIST_LOCK(lock) mtx_lock(lock) #define AG_LIST_UNLOCK(lock) mtx_unlock(lock) #else #define AG_HOTPLUG_LOCK_INIT(lock) #define AG_LIST_LOCK(lock) #define AG_LIST_UNLOCK(lock) #endif STATIC void agtiapi_CheckIOTimeout(void *data); static ag_card_info_t agCardInfoList[ AGTIAPI_MAX_CARDS ]; // card info list static void agtiapi_cam_action( struct cam_sim *, union ccb * ); static void agtiapi_cam_poll( struct cam_sim * ); // Function prototypes static d_open_t agtiapi_open; static d_close_t agtiapi_close; static d_read_t agtiapi_read; static d_write_t agtiapi_write; static d_ioctl_t agtiapi_CharIoctl; static void agtiapi_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); void agtiapi_adjust_queue_depth(struct cam_path *path, bit32 QueueDepth); // Character device entry points static struct cdevsw agtiapi_cdevsw = { .d_version = D_VERSION, .d_open = agtiapi_open, .d_close = agtiapi_close, .d_read = agtiapi_read, .d_write = agtiapi_write, .d_ioctl = agtiapi_CharIoctl, .d_name = "pmspcv", }; U32 maxTargets = 0; U32 ag_portal_count = 0; // In the cdevsw routines, we find our softc by using the si_drv1 member // of struct cdev. We set this variable to point to our softc in our // attach routine when we create the /dev entry. int agtiapi_open( struct cdev *dev, int oflags, int devtype, struct thread *td ) { struct agtiapi_softc *sc; /* Look up our softc. */ sc = dev->si_drv1; AGTIAPI_PRINTK("agtiapi_open\n"); AGTIAPI_PRINTK("Opened successfully. sc->my_dev %p\n", sc->my_dev); return( 0 ); } int agtiapi_close( struct cdev *dev, int fflag, int devtype, struct thread *td ) { struct agtiapi_softc *sc; // Look up our softc sc = dev->si_drv1; AGTIAPI_PRINTK("agtiapi_close\n"); AGTIAPI_PRINTK("Closed. sc->my_dev %p\n", sc->my_dev); return( 0 ); } int agtiapi_read( struct cdev *dev, struct uio *uio, int ioflag ) { struct agtiapi_softc *sc; // Look up our softc sc = dev->si_drv1; AGTIAPI_PRINTK( "agtiapi_read\n" ); AGTIAPI_PRINTK( "Asked to read %lu bytes. sc->my_dev %p\n", uio->uio_resid, sc->my_dev ); return( 0 ); } int agtiapi_write( struct cdev *dev, struct uio *uio, int ioflag ) { struct agtiapi_softc *sc; // Look up our softc sc = dev->si_drv1; AGTIAPI_PRINTK( "agtiapi_write\n" ); AGTIAPI_PRINTK( "Asked to write %lu bytes. sc->my_dev %p\n", uio->uio_resid, sc->my_dev ); return( 0 ); } int agtiapi_getdevlist( struct agtiapi_softc *pCard, tiIOCTLPayload_t *agIOCTLPayload ) { tdDeviceListPayload_t *pIoctlPayload = (tdDeviceListPayload_t *) agIOCTLPayload->FunctionSpecificArea; tdDeviceInfoIOCTL_t *pDeviceInfo = NULL; bit8 *pDeviceInfoOrg; tdsaDeviceData_t *pDeviceData = NULL; tiDeviceHandle_t **devList = NULL; tiDeviceHandle_t **devHandleArray = NULL; tiDeviceHandle_t *pDeviceHandle = NULL; bit32 x, memNeeded1; bit32 count, total; bit32 MaxDeviceCount; bit32 ret_val=IOCTL_CALL_INVALID_CODE; ag_portal_data_t *pPortalData; bit8 *pDeviceHandleList = NULL; AGTIAPI_PRINTK( "agtiapi_getdevlist: Enter\n" ); pDeviceInfoOrg = pIoctlPayload -> pDeviceInfo; MaxDeviceCount = pCard->devDiscover; if (MaxDeviceCount > pIoctlPayload->deviceLength ) { AGTIAPI_PRINTK( "agtiapi_getdevlist: MaxDeviceCount: %d > Requested device length: %d\n", MaxDeviceCount, pIoctlPayload->deviceLength ); MaxDeviceCount = pIoctlPayload->deviceLength; ret_val = IOCTL_CALL_FAIL; } AGTIAPI_PRINTK( "agtiapi_getdevlist: MaxDeviceCount: %d > Requested device length: %d\n", MaxDeviceCount, pIoctlPayload->deviceLength ); memNeeded1 = AG_ALIGNSIZE( MaxDeviceCount * sizeof(tiDeviceHandle_t *), sizeof(void *) ); AGTIAPI_PRINTK("agtiapi_getdevlist: portCount %d\n", pCard->portCount); devList = malloc(memNeeded1, TEMP2, M_WAITOK); if (devList == NULL) { AGTIAPI_PRINTK("agtiapi_getdevlist: failed to allocate memory\n"); ret_val = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; return ret_val; } osti_memset(devList, 0, memNeeded1); pPortalData = &pCard->pPortalData[0]; pDeviceHandleList = (bit8*)devList; for (total = x = 0; x < pCard->portCount; x++, pPortalData++) { count = tiINIGetDeviceHandlesForWinIOCTL(&pCard->tiRoot, &pPortalData->portalInfo.tiPortalContext, ( tiDeviceHandle_t **)pDeviceHandleList ,MaxDeviceCount ); if (count == DISCOVERY_IN_PROGRESS) { AGTIAPI_PRINTK( "agtiapi_getdevlist: DISCOVERY_IN_PROGRESS on " "portal %d\n", x ); free(devList, TEMP2); ret_val = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; return ret_val; } total += count; pDeviceHandleList+= count*sizeof(tiDeviceHandle_t *); MaxDeviceCount-= count; } if (total > pIoctlPayload->deviceLength) { total = pIoctlPayload->deviceLength; } // dump device information from device handle list count = 0; devHandleArray = devList; for (x = 0; x < pCard->devDiscover; x++) { pDeviceHandle = (tiDeviceHandle_t*)devHandleArray[x]; if (devList[x] != agNULL) { pDeviceData = devList [x]->tdData; pDeviceInfo = (tdDeviceInfoIOCTL_t*)(pDeviceInfoOrg + sizeof(tdDeviceInfoIOCTL_t) * count); if (pDeviceData != agNULL && pDeviceInfo != agNULL) { osti_memcpy( &pDeviceInfo->sasAddressHi, pDeviceData->agDeviceInfo.sasAddressHi, sizeof(bit32) ); osti_memcpy( &pDeviceInfo->sasAddressLo, pDeviceData->agDeviceInfo.sasAddressLo, sizeof(bit32) ); #if 0 pDeviceInfo->sasAddressHi = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressHi ); pDeviceInfo->sasAddressLo = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressLo ); #endif pDeviceInfo->deviceType = ( pDeviceData->agDeviceInfo.devType_S_Rate & 0x30 ) >> 4; pDeviceInfo->linkRate = pDeviceData->agDeviceInfo.devType_S_Rate & 0x0F; pDeviceInfo->phyId = pDeviceData->phyID; pDeviceInfo->ishost = pDeviceData->target_ssp_stp_smp; pDeviceInfo->DeviceHandle= (unsigned long)pDeviceHandle; if(pDeviceInfo->deviceType == 0x02) { bit8 *sasAddressHi; bit8 *sasAddressLo; tiIniGetDirectSataSasAddr(&pCard->tiRoot, pDeviceData->phyID, &sasAddressHi, &sasAddressLo); pDeviceInfo->sasAddressHi = DMA_BEBIT32_TO_BIT32(*(bit32*)sasAddressHi); pDeviceInfo->sasAddressLo = DMA_BEBIT32_TO_BIT32(*(bit32*)sasAddressLo) + pDeviceData->phyID + 16; } else { pDeviceInfo->sasAddressHi = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressHi ); pDeviceInfo->sasAddressLo = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressLo ); } AGTIAPI_PRINTK( "agtiapi_getdevlist: devicetype %x\n", pDeviceInfo->deviceType ); AGTIAPI_PRINTK( "agtiapi_getdevlist: linkrate %x\n", pDeviceInfo->linkRate ); AGTIAPI_PRINTK( "agtiapi_getdevlist: phyID %x\n", pDeviceInfo->phyId ); AGTIAPI_PRINTK( "agtiapi_getdevlist: addresshi %x\n", pDeviceInfo->sasAddressHi ); AGTIAPI_PRINTK( "agtiapi_getdevlist: addresslo %x\n", pDeviceInfo->sasAddressHi ); } else { AGTIAPI_PRINTK( "agtiapi_getdevlist: pDeviceData %p or pDeviceInfo " "%p is NULL %d\n", pDeviceData, pDeviceInfo, x ); } count++; } } pIoctlPayload->realDeviceCount = count; AGTIAPI_PRINTK( "agtiapi_getdevlist: Exit RealDeviceCount = %d\n", count ); if (devList) { free(devList, TEMP2); } if(ret_val != IOCTL_CALL_FAIL) { ret_val = IOCTL_CALL_SUCCESS; } agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; return ret_val; } /****************************************************************************** agtiapi_getCardInfo() Purpose: This function retrives the Card information Parameters: Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ int agtiapi_getCardInfo ( struct agtiapi_softc *pCard, U32_64 size, void *buffer ) { CardInfo_t *pCardInfo; pCardInfo = (CardInfo_t *)buffer; pCardInfo->deviceId = pci_get_device(pCard->my_dev); pCardInfo->vendorId =pci_get_vendor(pCard->my_dev) ; memcpy( pCardInfo->pciMemBaseSpc, pCard->pCardInfo->pciMemBaseSpc, ((sizeof(U32_64))*PCI_NUMBER_BARS) ); pCardInfo->deviceNum = pci_get_slot(pCard->my_dev); pCardInfo->pciMemBase = pCard->pCardInfo->pciMemBase; pCardInfo->pciIOAddrLow = pCard->pCardInfo->pciIOAddrLow; pCardInfo->pciIOAddrUp = pCard->pCardInfo->pciIOAddrUp; pCardInfo->busNum =pci_get_bus(pCard->my_dev); return 0; } void agtiapi_adjust_queue_depth(struct cam_path *path, bit32 QueueDepth) { struct ccb_relsim crs; xpt_setup_ccb(&crs.ccb_h, path, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = QueueDepth; xpt_action((union ccb *)&crs); if(crs.ccb_h.status != CAM_REQ_CMP) { printf("XPT_REL_SIMQ failed\n"); } } static void agtiapi_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct agtiapi_softc *pmsc; U32 TID; ag_device_t *targ; pmsc = (struct agtiapi_softc*)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { break; } TID = cgd->ccb_h.target_id; if (TID >= 0 && TID < maxTargets){ if (pmsc != NULL){ TID = INDEX(pmsc, TID); targ = &pmsc->pDevList[TID]; agtiapi_adjust_queue_depth(path, targ->qdepth); } } break; } default: break; } } /****************************************************************************** agtiapi_CharIoctl() Purpose: This function handles the ioctl from application layer Parameters: Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ static int agtiapi_CharIoctl( struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td ) { struct sema mx; datatosend *load; // structure defined in lxcommon.h tiIOCTLPayload_t *pIoctlPayload; struct agtiapi_softc *pCard; pCard=dev->si_drv1; U32 status = 0; U32 retValue; int err = 0; int error = 0; tdDeviceListPayload_t *pDeviceList = NULL; unsigned long flags; switch (cmd) { case AGTIAPI_IOCTL: load=(datatosend*)data; pIoctlPayload = malloc(load->datasize,TEMP,M_WAITOK); AGTIAPI_PRINTK( "agtiapi_CharIoctl: old load->datasize = %d\n", load->datasize ); //Copy payload to kernel buffer, on success it returns 0 err = copyin(load->data,pIoctlPayload,load->datasize); if (err) { status = IOCTL_CALL_FAIL; return status; } sema_init(&mx,0,"sem"); pCard->pIoctlSem =&mx; pCard->up_count = pCard->down_count = 0; if ( pIoctlPayload->MajorFunction == IOCTL_MJ_GET_DEVICE_LIST ) { retValue = agtiapi_getdevlist(pCard, pIoctlPayload); if (retValue == 0) { pIoctlPayload->Status = IOCTL_CALL_SUCCESS; status = IOCTL_CALL_SUCCESS; } else { pIoctlPayload->Status = IOCTL_CALL_FAIL; status = IOCTL_CALL_FAIL; } //update new device length pDeviceList = (tdDeviceListPayload_t*)pIoctlPayload->FunctionSpecificArea; load->datasize =load->datasize - sizeof(tdDeviceInfoIOCTL_t) * (pDeviceList->deviceLength - pDeviceList->realDeviceCount); AGTIAPI_PRINTK( "agtiapi_CharIoctl: new load->datasize = %d\n", load->datasize ); } else if (pIoctlPayload->MajorFunction == IOCTL_MN_GET_CARD_INFO) { retValue = agtiapi_getCardInfo( pCard, pIoctlPayload->Length, (pIoctlPayload->FunctionSpecificArea) ); if (retValue == 0) { pIoctlPayload->Status = IOCTL_CALL_SUCCESS; status = IOCTL_CALL_SUCCESS; } else { pIoctlPayload->Status = IOCTL_CALL_FAIL; status = IOCTL_CALL_FAIL; } } else if ( pIoctlPayload->MajorFunction == IOCTL_MJ_CHECK_DPMC_EVENT ) { if ( pCard->flags & AGTIAPI_PORT_PANIC ) { strcpy ( pIoctlPayload->FunctionSpecificArea, "DPMC LEAN\n" ); } else { strcpy ( pIoctlPayload->FunctionSpecificArea, "do not dpmc lean\n" ); } pIoctlPayload->Status = IOCTL_CALL_SUCCESS; status = IOCTL_CALL_SUCCESS; } else if (pIoctlPayload->MajorFunction == IOCTL_MJ_CHECK_FATAL_ERROR ) { AGTIAPI_PRINTK("agtiapi_CharIoctl: IOCTL_MJ_CHECK_FATAL_ERROR call received for card %d\n", pCard->cardNo); //read port status to see if there is a fatal event if(pCard->flags & AGTIAPI_PORT_PANIC) { printf("agtiapi_CharIoctl: Port Panic Status For Card %d is True\n",pCard->cardNo); pIoctlPayload->Status = IOCTL_MJ_FATAL_ERR_CHK_SEND_TRUE; } else { AGTIAPI_PRINTK("agtiapi_CharIoctl: Port Panic Status For Card %d is False\n",pCard->cardNo); pIoctlPayload->Status = IOCTL_MJ_FATAL_ERR_CHK_SEND_FALSE; } status = IOCTL_CALL_SUCCESS; } else if (pIoctlPayload->MajorFunction == IOCTL_MJ_FATAL_ERROR_DUMP_COMPLETE) { AGTIAPI_PRINTK("agtiapi_CharIoctl: IOCTL_MJ_FATAL_ERROR_DUMP_COMPLETE call received for card %d\n", pCard->cardNo); //set flags bit status to be a soft reset pCard->flags |= AGTIAPI_SOFT_RESET; //trigger soft reset for the card retValue = agtiapi_ResetCard (pCard, &flags); if(retValue == AGTIAPI_SUCCESS) { //clear port panic status pCard->flags &= ~AGTIAPI_PORT_PANIC; pIoctlPayload->Status = IOCTL_MJ_FATAL_ERROR_SOFT_RESET_TRIG; status = IOCTL_CALL_SUCCESS; } else { pIoctlPayload->Status = IOCTL_CALL_FAIL; status = IOCTL_CALL_FAIL; } } else { status = tiCOMMgntIOCTL( &pCard->tiRoot, pIoctlPayload, pCard, NULL, NULL ); if (status == IOCTL_CALL_PENDING) { ostiIOCTLWaitForSignal(&pCard->tiRoot,NULL, NULL, NULL); status = IOCTL_CALL_SUCCESS; } } pCard->pIoctlSem = NULL; err = 0; //copy kernel buffer to userland buffer err=copyout(pIoctlPayload,load->data,load->datasize); if (err) { status = IOCTL_CALL_FAIL; return status; } free(pIoctlPayload,TEMP); pIoctlPayload=NULL; break; default: error = ENOTTY; break; } return(status); } /****************************************************************************** agtiapi_probe() Purpose: This function initialize and registere all detected HBAs. The first function being called in driver after agtiapi_probe() Parameters: device_t dev (IN) - device pointer Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ static int agtiapi_probe( device_t dev ) { int retVal; int thisCard; ag_card_info_t *thisCardInst; thisCard = device_get_unit( dev ); if ( thisCard >= AGTIAPI_MAX_CARDS ) { device_printf( dev, "Too many PMC-Sierra cards detected ERROR!\n" ); return (ENXIO); // maybe change to different return value? } thisCardInst = &agCardInfoList[ thisCard ]; retVal = agtiapi_ProbeCard( dev, thisCardInst, thisCard ); if ( retVal ) return (ENXIO); // maybe change to different return value? return( BUS_PROBE_DEFAULT ); // successful probe } /****************************************************************************** agtiapi_attach() Purpose: This function initialize and registere all detected HBAs. The first function being called in driver after agtiapi_probe() Parameters: device_t dev (IN) - device pointer Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ static int agtiapi_attach( device_t devx ) { // keeping get_unit call to once int thisCard = device_get_unit( devx ); struct agtiapi_softc *pmsc; ag_card_info_t *thisCardInst = &agCardInfoList[ thisCard ]; ag_resource_info_t *pRscInfo; int idx; int lenRecv; char buffer [256], *pLastUsedChar; union ccb *ccb; int bus, tid, lun; struct ccb_setasync csa; AGTIAPI_PRINTK("agtiapi_attach: start dev %p thisCard %d\n", devx, thisCard); // AGTIAPI_PRINTK( "agtiapi_attach: entry pointer values A %p / %p\n", // thisCardInst->pPCIDev, thisCardInst ); AGTIAPI_PRINTK( "agtiapi_attach: deviceID: 0x%x\n", pci_get_devid( devx ) ); TUNABLE_INT_FETCH( "DPMC_TIMEOUT_SECS", &ag_timeout_secs ); TUNABLE_INT_FETCH( "DPMC_TIDEBUG_LEVEL", &gTiDebugLevel ); // printf( "agtiapi_attach: debugLevel %d, timeout %d\n", // gTiDebugLevel, ag_timeout_secs ); if ( ag_timeout_secs < 1 ) { ag_timeout_secs = 1; // set minimum timeout value of 1 second } ag_timeout_secs = (ag_timeout_secs * 1000); // convert to millisecond notation // Look up our softc and initialize its fields. pmsc = device_get_softc( devx ); pmsc->my_dev = devx; /* Get NumberOfPortals */ if ((ostiGetTransportParam( &pmsc->tiRoot, "Global", "CardDefault", agNULL, agNULL, agNULL, agNULL, "NumberOfPortals", buffer, 255, &lenRecv ) == tiSuccess) && (lenRecv != 0)) { if (osti_strncmp(buffer, "0x", 2) == 0) { ag_portal_count = osti_strtoul (buffer, &pLastUsedChar, 0); } else { ag_portal_count = osti_strtoul (buffer, &pLastUsedChar, 10); } if (ag_portal_count > AGTIAPI_MAX_PORTALS) ag_portal_count = AGTIAPI_MAX_PORTALS; } else { ag_portal_count = AGTIAPI_MAX_PORTALS; } AGTIAPI_PRINTK( "agtiapi_attach: ag_portal_count=%d\n", ag_portal_count ); // initialize hostdata structure pmsc->flags |= AGTIAPI_INIT_TIME | AGTIAPI_SCSI_REGISTERED | AGTIAPI_INITIATOR; pmsc->cardNo = thisCard; pmsc->ccbTotal = 0; pmsc->portCount = ag_portal_count; pmsc->pCardInfo = thisCardInst; pmsc->tiRoot.osData = pmsc; pmsc->pCardInfo->pCard = (void *)pmsc; pmsc->VidDid = ( pci_get_vendor(devx) << 16 ) | pci_get_device( devx ); pmsc->SimQFrozen = agFALSE; pmsc->devq_flag = agFALSE; pRscInfo = &thisCardInst->tiRscInfo; osti_memset(buffer, 0, 256); lenRecv = 0; /* Get MaxTargets */ if ((ostiGetTransportParam( &pmsc->tiRoot, "Global", "InitiatorParms", agNULL, agNULL, agNULL, agNULL, "MaxTargets", buffer, sizeof(buffer), &lenRecv ) == tiSuccess) && (lenRecv != 0)) { if (osti_strncmp(buffer, "0x", 2) == 0) { maxTargets = osti_strtoul (buffer, &pLastUsedChar, 0); AGTIAPI_PRINTK( "agtiapi_attach: maxTargets = osti_strtoul 0 \n" ); } else { maxTargets = osti_strtoul (buffer, &pLastUsedChar, 10); AGTIAPI_PRINTK( "agtiapi_attach: maxTargets = osti_strtoul 10\n" ); } } else { if(Is_ADP8H(pmsc)) maxTargets = AGTIAPI_MAX_DEVICE_8H; else if(Is_ADP7H(pmsc)) maxTargets = AGTIAPI_MAX_DEVICE_7H; else maxTargets = AGTIAPI_MAX_DEVICE; } if (maxTargets > AGTIAPI_HW_LIMIT_DEVICE) { AGTIAPI_PRINTK( "agtiapi_attach: maxTargets: %d > AGTIAPI_HW_LIMIT_DEVICE: %d\n", maxTargets, AGTIAPI_HW_LIMIT_DEVICE ); AGTIAPI_PRINTK( "agtiapi_attach: change maxTargets = AGTIAPI_HW_LIMIT_DEVICE\n" ); maxTargets = AGTIAPI_HW_LIMIT_DEVICE; } pmsc->devDiscover = maxTargets ; #ifdef HIALEAH_ENCRYPTION ag_encryption_enable = 1; if(ag_encryption_enable && pci_get_device(pmsc->pCardInfo->pPCIDev) == PCI_DEVICE_ID_HIALEAH_HBA_SPCVE) { pmsc->encrypt = 1; pRscInfo->tiLoLevelResource.loLevelOption.encryption = agTRUE; printf("agtiapi_attach: Encryption Enabled\n" ); } #endif // ## for now, skip calls to ostiGetTransportParam(...) // ## for now, skip references to DIF & EDC // Create a /dev entry for this device. The kernel will assign us // a major number automatically. We use the unit number of this // device as the minor number and name the character device // "agtiapi". pmsc->my_cdev = make_dev( &agtiapi_cdevsw, thisCard, UID_ROOT, GID_WHEEL, 0600, "spcv%u", thisCard ); pmsc->my_cdev->si_drv1 = pmsc; mtx_init( &thisCardInst->pmIOLock, "pmc SAS I/O lock", NULL, MTX_DEF|MTX_RECURSE ); struct cam_devq *devq; /* set the maximum number of pending IOs */ devq = cam_simq_alloc( AGTIAPI_MAX_CAM_Q_DEPTH ); if (devq == NULL) { AGTIAPI_PRINTK("agtiapi_attach: cam_simq_alloc is NULL\n" ); return( EIO ); } struct cam_sim *lsim; lsim = cam_sim_alloc( agtiapi_cam_action, agtiapi_cam_poll, "pmspcbsd", pmsc, thisCard, &thisCardInst->pmIOLock, 1, // queued per target AGTIAPI_MAX_CAM_Q_DEPTH, // max tag depth devq ); if ( lsim == NULL ) { cam_simq_free( devq ); AGTIAPI_PRINTK("agtiapi_attach: cam_sim_alloc is NULL\n" ); return( EIO ); } pmsc->dev_scan = agFALSE; //one cam sim per scsi bus mtx_lock( &thisCardInst->pmIOLock ); if ( xpt_bus_register( lsim, devx, 0 ) != CAM_SUCCESS ) { // bus 0 cam_sim_free( lsim, TRUE ); mtx_unlock( &thisCardInst->pmIOLock ); AGTIAPI_PRINTK("agtiapi_attach: xpt_bus_register fails\n" ); return( EIO ); } pmsc->sim = lsim; bus = cam_sim_path(pmsc->sim); tid = CAM_TARGET_WILDCARD; lun = CAM_LUN_WILDCARD; ccb = xpt_alloc_ccb_nowait(); if (ccb == agNULL) { mtx_unlock( &thisCardInst->pmIOLock ); cam_sim_free( lsim, TRUE ); cam_simq_free( devq ); return ( EIO ); } if (xpt_create_path(&ccb->ccb_h.path, agNULL, bus, tid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mtx_unlock( &thisCardInst->pmIOLock ); cam_sim_free( lsim, TRUE ); cam_simq_free( devq ); xpt_free_ccb(ccb); return( EIO ); } pmsc->path = ccb->ccb_h.path; xpt_setup_ccb(&csa.ccb_h, pmsc->path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = agtiapi_async; csa.callback_arg = pmsc; xpt_action((union ccb *)&csa); if (csa.ccb_h.status != CAM_REQ_CMP) { AGTIAPI_PRINTK("agtiapi_attach: Unable to register AC_FOUND_DEVICE\n" ); } lsim->devq = devq; mtx_unlock( &thisCardInst->pmIOLock ); // get TD and lower layer memory requirements tiCOMGetResource( &pmsc->tiRoot, &pRscInfo->tiLoLevelResource, &pRscInfo->tiInitiatorResource, NULL, &pRscInfo->tiSharedMem ); agtiapi_ScopeDMARes( thisCardInst ); AGTIAPI_PRINTK( "agtiapi_attach: size from the call agtiapi_ScopeDMARes" " 0x%x \n", pmsc->typhn ); // initialize card information and get resource ready if( agtiapi_InitResource( thisCardInst ) == AGTIAPI_FAIL ) { AGTIAPI_PRINTK( "agtiapi_attach: Card %d initialize resource ERROR\n", thisCard ); } // begin: allocate and initialize card portal info resource ag_portal_data_t *pPortalData; if (pmsc->portCount == 0) { pmsc->pPortalData = NULL; } else { pmsc->pPortalData = (ag_portal_data_t *) malloc( sizeof(ag_portal_data_t) * pmsc->portCount, M_PMC_MPRT, M_ZERO | M_WAITOK ); if (pmsc->pPortalData == NULL) { AGTIAPI_PRINTK( "agtiapi_attach: Portal memory allocation ERROR\n" ); } } pPortalData = pmsc->pPortalData; for( idx = 0; idx < pmsc->portCount; idx++ ) { pPortalData->pCard = pmsc; pPortalData->portalInfo.portID = idx; pPortalData->portalInfo.tiPortalContext.osData = (void *)pPortalData; pPortalData++; } // end: allocate and initialize card portal info resource // begin: enable msix // setup msix // map to interrupt handler int error = 0; int mesgs = MAX_MSIX_NUM_VECTOR; int i, cnt; void (*intrHandler[MAX_MSIX_NUM_ISR])(void *arg) = { agtiapi_IntrHandler0, agtiapi_IntrHandler1, agtiapi_IntrHandler2, agtiapi_IntrHandler3, agtiapi_IntrHandler4, agtiapi_IntrHandler5, agtiapi_IntrHandler6, agtiapi_IntrHandler7, agtiapi_IntrHandler8, agtiapi_IntrHandler9, agtiapi_IntrHandler10, agtiapi_IntrHandler11, agtiapi_IntrHandler12, agtiapi_IntrHandler13, agtiapi_IntrHandler14, agtiapi_IntrHandler15 }; cnt = pci_msix_count(devx); AGTIAPI_PRINTK("supported MSIX %d\n", cnt); //this should be 64 mesgs = MIN(mesgs, cnt); error = pci_alloc_msix(devx, &mesgs); if (error != 0) { printf( "pci_alloc_msix error %d\n", error ); AGTIAPI_PRINTK("error %d\n", error); return( EIO ); } for(i=0; i < mesgs; i++) { pmsc->rscID[i] = i + 1; pmsc->irq[i] = bus_alloc_resource_any( devx, SYS_RES_IRQ, &pmsc->rscID[i], RF_ACTIVE ); if( pmsc->irq[i] == NULL ) { printf( "RES_IRQ went terribly bad at %d\n", i ); return( EIO ); } if ( (error = bus_setup_intr( devx, pmsc->irq[i], INTR_TYPE_CAM | INTR_MPSAFE, NULL, intrHandler[i], pmsc, &pmsc->intrcookie[i] ) ) != 0 ) { device_printf( devx, "Failed to register handler" ); return( EIO ); } } pmsc->flags |= AGTIAPI_IRQ_REQUESTED; pmsc->pCardInfo->maxInterruptVectors = MAX_MSIX_NUM_VECTOR; // end: enable msix int ret = 0; ret = agtiapi_InitCardSW(pmsc); if (ret == AGTIAPI_FAIL || ret == AGTIAPI_UNKNOWN) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_InitCardSW failure %d\n", ret ); return( EIO ); } pmsc->ccbFreeList = NULL; pmsc->ccbChainList = NULL; pmsc->ccbAllocList = NULL; pmsc->flags |= ( AGTIAPI_INSTALLED ); ret = agtiapi_alloc_requests( pmsc ); if( ret != 0 ) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_alloc_requests failure %d\n", ret ); return( EIO ); } ret = agtiapi_alloc_ostimem( pmsc ); if (ret != AGTIAPI_SUCCESS) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_alloc_ostimem failure %d\n", ret ); return( EIO ); } ret = agtiapi_InitCardHW( pmsc ); if (ret != 0) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_InitCardHW failure %d\n", ret ); return( EIO ); } #ifdef HIALEAH_ENCRYPTION if(pmsc->encrypt) { if((agtiapi_SetupEncryption(pmsc)) < 0) AGTIAPI_PRINTK("SetupEncryption returned less than 0\n"); } #endif pmsc->flags &= ~AGTIAPI_INIT_TIME; return( 0 ); } /****************************************************************************** agtiapi_InitCardSW() Purpose: Host Bus Adapter Initialization Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: TBD, need chip register information ******************************************************************************/ STATIC agBOOLEAN agtiapi_InitCardSW( struct agtiapi_softc *pmsc ) { ag_card_info_t *thisCardInst = pmsc->pCardInfo; ag_resource_info_t *pRscInfo = &thisCardInst->tiRscInfo; int initSWIdx; // begin: agtiapi_InitCardSW() // now init some essential locks n agtiapi_InitCardSW mtx_init( &pmsc->sendLock, "local q send lock", NULL, MTX_DEF ); mtx_init( &pmsc->doneLock, "local q done lock", NULL, MTX_DEF ); mtx_init( &pmsc->sendSMPLock, "local q send lock", NULL, MTX_DEF ); mtx_init( &pmsc->doneSMPLock, "local q done lock", NULL, MTX_DEF ); mtx_init( &pmsc->ccbLock, "ccb list lock", NULL, MTX_DEF ); mtx_init( &pmsc->devListLock, "hotP devListLock", NULL, MTX_DEF ); mtx_init( &pmsc->memLock, "dynamic memory lock", NULL, MTX_DEF ); mtx_init( &pmsc->freezeLock, "sim freeze lock", NULL, MTX_DEF | MTX_RECURSE); // initialize lower layer resources //## if (pCard->flags & AGTIAPI_INIT_TIME) { #ifdef HIALEAH_ENCRYPTION /* Enable encryption if chip supports it */ if (pci_get_device(pmsc->pCardInfo->pPCIDev) == PCI_DEVICE_ID_HIALEAH_HBA_SPCVE) pmsc->encrypt = 1; if (pmsc->encrypt) pRscInfo->tiLoLevelResource.loLevelOption.encryption = agTRUE; #endif pmsc->flags &= ~(AGTIAPI_PORT_INITIALIZED | AGTIAPI_SYS_INTR_ON); // For now, up to 16 MSIX vectors are supported thisCardInst->tiRscInfo.tiLoLevelResource.loLevelOption. maxInterruptVectors = pmsc->pCardInfo->maxInterruptVectors; AGTIAPI_PRINTK( "agtiapi_InitCardSW: maxInterruptVectors set to %d", pmsc->pCardInfo->maxInterruptVectors ); thisCardInst->tiRscInfo.tiLoLevelResource.loLevelOption.max_MSI_InterruptVectors = 0; thisCardInst->tiRscInfo.tiLoLevelResource.loLevelOption.flag = 0; pRscInfo->tiLoLevelResource.loLevelOption.maxNumOSLocks = 0; AGTIAPI_PRINTK( "agtiapi_InitCardSW: tiCOMInit root %p, dev %p, pmsc %p\n", &pmsc->tiRoot, pmsc->my_dev, pmsc ); if( tiCOMInit( &pmsc->tiRoot, &thisCardInst->tiRscInfo.tiLoLevelResource, &thisCardInst->tiRscInfo.tiInitiatorResource, NULL, &thisCardInst->tiRscInfo.tiSharedMem ) != tiSuccess ) { AGTIAPI_PRINTK( "agtiapi_InitCardSW: tiCOMInit ERROR\n" ); return AGTIAPI_FAIL; } int maxLocks; maxLocks = pRscInfo->tiLoLevelResource.loLevelOption.numOfQueuesPerPort; pmsc->STLock = malloc( ( maxLocks * sizeof(struct mtx) ), M_PMC_MSTL, M_ZERO | M_WAITOK ); for( initSWIdx = 0; initSWIdx < maxLocks; initSWIdx++ ) { // init all indexes mtx_init( &pmsc->STLock[initSWIdx], "LL & TD lock", NULL, MTX_DEF ); } if( tiCOMPortInit( &pmsc->tiRoot, agFALSE ) != tiSuccess ) { printf( "agtiapi_InitCardSW: tiCOMPortInit ERROR -- AGTIAPI_FAIL\n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_InitCardSW: tiCOMPortInit" " root %p, dev %p, pmsc %p\n", &pmsc->tiRoot, pmsc->my_dev, pmsc ); pmsc->flags |= AGTIAPI_PORT_INITIALIZED; pmsc->freezeSim = agFALSE; #ifdef HIALEAH_ENCRYPTION atomic_set(&outstanding_encrypted_io_count, 0); /*fix below*/ /*if(pmsc->encrypt && (pmsc->flags & AGTIAPI_INIT_TIME)) if((agtiapi_SetupEncryptionPools(pmsc)) != 0) printf("SetupEncryptionPools failed\n"); */ #endif return AGTIAPI_SUCCESS; // end: agtiapi_InitCardSW() } /****************************************************************************** agtiapi_InitCardHW() Purpose: Host Bus Adapter Initialization Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: TBD, need chip register information ******************************************************************************/ STATIC agBOOLEAN agtiapi_InitCardHW( struct agtiapi_softc *pmsc ) { U32 numVal; U32 count; U32 loop; // begin: agtiapi_InitCardHW() ag_portal_info_t *pPortalInfo = NULL; ag_portal_data_t *pPortalData; // ISR is registered, enable chip interrupt. tiCOMSystemInterruptsActive( &pmsc->tiRoot, agTRUE ); pmsc->flags |= AGTIAPI_SYS_INTR_ON; numVal = sizeof(ag_device_t) * pmsc->devDiscover; pmsc->pDevList = (ag_device_t *)malloc( numVal, M_PMC_MDVT, M_ZERO | M_WAITOK ); if( !pmsc->pDevList ) { AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d DevList ERROR\n", numVal ); panic( "agtiapi_InitCardHW\n" ); return AGTIAPI_FAIL; } #ifdef LINUX_PERBI_SUPPORT numVal = sizeof(ag_slr_map_t) * pmsc->devDiscover; pmsc->pSLRList = (ag_slr_map_t *)malloc( numVal, M_PMC_MSLR, M_ZERO | M_WAITOK ); if( !pmsc->pSLRList ) { AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d SLRList ERROR\n", numVal ); panic( "agtiapi_InitCardHW SLRL\n" ); return AGTIAPI_FAIL; } numVal = sizeof(ag_tgt_map_t) * pmsc->devDiscover; pmsc->pWWNList = (ag_tgt_map_t *)malloc( numVal, M_PMC_MTGT, M_ZERO | M_WAITOK ); if( !pmsc->pWWNList ) { AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d WWNList ERROR\n", numVal ); panic( "agtiapi_InitCardHW WWNL\n" ); return AGTIAPI_FAIL; } // Get the WWN_to_target_ID mappings from the // holding area which contains the input of the // system configuration file. if( ag_Perbi ) agtiapi_GetWWNMappings( pmsc, agMappingList ); else { agtiapi_GetWWNMappings( pmsc, 0 ); if( agMappingList ) printf( "agtiapi_InitCardHW: WWN PERBI disabled WARN\n" ); } #endif //agtiapi_DelaySec(5); DELAY( 500000 ); pmsc->tgtCount = 0; pmsc->flags &= ~AGTIAPI_CB_DONE; pPortalData = pmsc->pPortalData; //start port for (count = 0; count < pmsc->portCount; count++) { AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); pPortalInfo = &pPortalData->portalInfo; pPortalInfo->portStatus &= ~( AGTIAPI_PORT_START | AGTIAPI_PORT_DISC_READY | AGTIAPI_DISC_DONE | AGTIAPI_DISC_COMPLETE ); for (loop = 0; loop < AGTIAPI_LOOP_MAX; loop++) { AGTIAPI_PRINTK( "tiCOMPortStart entry data %p / %d / %p\n", &pmsc->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext ); if( tiCOMPortStart( &pmsc->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext, 0 ) != tiSuccess ) { AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); agtiapi_DelayMSec( AGTIAPI_EXTRA_DELAY ); AG_SPIN_LOCK_IRQ(agtiapi_host_lock, flags); AGTIAPI_PRINTK( "tiCOMPortStart failed -- no loop, portalData %p\n", pPortalData ); } else { AGTIAPI_PRINTK( "tiCOMPortStart success no loop, portalData %p\n", pPortalData ); break; } } // end of for loop /* release lock */ AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); if( loop >= AGTIAPI_LOOP_MAX ) { return AGTIAPI_FAIL; } tiCOMGetPortInfo( &pmsc->tiRoot, &pPortalInfo->tiPortalContext, &pPortalInfo->tiPortInfo ); pPortalData++; } /* discover target device */ #ifndef HOTPLUG_SUPPORT agtiapi_DiscoverTgt( pCard ); #endif pmsc->flags |= AGTIAPI_INSTALLED; if( pmsc->flags & AGTIAPI_INIT_TIME ) { agtiapi_TITimer( (void *)pmsc ); pmsc->flags |= AGTIAPI_TIMER_ON; } return 0; } /****************************************************************************** agtiapi_IntrHandlerx_() Purpose: Interrupt service routine. Parameters: void arg (IN) Pointer to the HBA data structure bit32 idx (IN) Vector index ******************************************************************************/ void agtiapi_IntrHandlerx_( void *arg, int index ) { struct agtiapi_softc *pCard; int rv; pCard = (struct agtiapi_softc *)arg; #ifndef AGTIAPI_DPC ccb_t *pccb; #endif AG_LOCAL_LOCK(&(pCard->pCardInfo->pmIOLock)); AG_PERF_SPINLOCK(agtiapi_host_lock); if (pCard->flags & AGTIAPI_SHUT_DOWN) goto ext; rv = tiCOMInterruptHandler(&pCard->tiRoot, index); if (rv == agFALSE) { /* not our irq */ AG_SPIN_UNLOCK(agtiapi_host_lock); AG_LOCAL_UNLOCK(&(pCard->pCardInfo->pmIOLock)); return; } #ifdef AGTIAPI_DPC tasklet_hi_schedule(&pCard->tasklet_dpc[idx]); #else /* consume all completed entries, 100 is random number to be big enough */ tiCOMDelayedInterruptHandler(&pCard->tiRoot, index, 100, tiInterruptContext); AG_GET_DONE_PCCB(pccb, pCard); AG_GET_DONE_SMP_PCCB(pccb, pCard); #endif ext: AG_SPIN_UNLOCK(agtiapi_host_lock); AG_LOCAL_UNLOCK(&(pCard->pCardInfo->pmIOLock)); return; } /****************************************************************************** agtiapi_IntrHandler0() Purpose: Interrupt service routine for interrupt vector index 0. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler0( void *arg ) { agtiapi_IntrHandlerx_( arg, 0 ); return; } /****************************************************************************** agtiapi_IntrHandler1() Purpose: Interrupt service routine for interrupt vector index 1. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler1( void *arg ) { agtiapi_IntrHandlerx_( arg, 1 ); return; } /****************************************************************************** agtiapi_IntrHandler2() Purpose: Interrupt service routine for interrupt vector index 2. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler2( void *arg ) { agtiapi_IntrHandlerx_( arg, 2 ); return; } /****************************************************************************** agtiapi_IntrHandler3() Purpose: Interrupt service routine for interrupt vector index 3. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler3( void *arg ) { agtiapi_IntrHandlerx_( arg, 3 ); return; } /****************************************************************************** agtiapi_IntrHandler4() Purpose: Interrupt service routine for interrupt vector index 4. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler4( void *arg ) { agtiapi_IntrHandlerx_( arg, 4 ); return; } /****************************************************************************** agtiapi_IntrHandler5() Purpose: Interrupt service routine for interrupt vector index 5. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler5( void *arg ) { agtiapi_IntrHandlerx_( arg, 5 ); return; } /****************************************************************************** agtiapi_IntrHandler6() Purpose: Interrupt service routine for interrupt vector index 6. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler6( void *arg ) { agtiapi_IntrHandlerx_( arg, 6 ); return; } /****************************************************************************** agtiapi_IntrHandler7() Purpose: Interrupt service routine for interrupt vector index 7. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler7( void *arg ) { agtiapi_IntrHandlerx_( arg, 7 ); return; } /****************************************************************************** agtiapi_IntrHandler8() Purpose: Interrupt service routine for interrupt vector index 8. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler8( void *arg ) { agtiapi_IntrHandlerx_( arg, 8 ); return; } /****************************************************************************** agtiapi_IntrHandler9() Purpose: Interrupt service routine for interrupt vector index 9. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler9( void *arg ) { agtiapi_IntrHandlerx_( arg, 9 ); return; } /****************************************************************************** agtiapi_IntrHandler10() Purpose: Interrupt service routine for interrupt vector index 10. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler10( void *arg ) { agtiapi_IntrHandlerx_( arg, 10 ); return; } /****************************************************************************** agtiapi_IntrHandler11() Purpose: Interrupt service routine for interrupt vector index 11. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler11( void *arg ) { agtiapi_IntrHandlerx_( arg, 11 ); return; } /****************************************************************************** agtiapi_IntrHandler12() Purpose: Interrupt service routine for interrupt vector index 12. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler12( void *arg ) { agtiapi_IntrHandlerx_( arg, 12 ); return; } /****************************************************************************** agtiapi_IntrHandler13() Purpose: Interrupt service routine for interrupt vector index 13. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler13( void *arg ) { agtiapi_IntrHandlerx_( arg, 13 ); return; } /****************************************************************************** agtiapi_IntrHandler14() Purpose: Interrupt service routine for interrupt vector index 14. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler14( void *arg ) { agtiapi_IntrHandlerx_( arg, 14 ); return; } /****************************************************************************** agtiapi_IntrHandler15() Purpose: Interrupt service routine for interrupt vector index 15. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler15( void *arg ) { agtiapi_IntrHandlerx_( arg, 15 ); return; } static void agtiapi_SglMemoryCB( void *arg, bus_dma_segment_t *dm_segs, int nseg, int error ) { bus_addr_t *addr; AGTIAPI_PRINTK("agtiapi_SglMemoryCB: start\n"); if (error != 0) { AGTIAPI_PRINTK("agtiapi_SglMemoryCB: error %d\n", error); panic("agtiapi_SglMemoryCB: error %d\n", error); return; } addr = arg; *addr = dm_segs[0].ds_addr; return; } static void agtiapi_MemoryCB( void *arg, bus_dma_segment_t *dm_segs, int nseg, int error ) { bus_addr_t *addr; AGTIAPI_PRINTK("agtiapi_MemoryCB: start\n"); if (error != 0) { AGTIAPI_PRINTK("agtiapi_MemoryCB: error %d\n", error); panic("agtiapi_MemoryCB: error %d\n", error); return; } addr = arg; *addr = dm_segs[0].ds_addr; return; } /****************************************************************************** agtiapi_alloc_requests() Purpose: Allocates resources such as dma tag and timer Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: ******************************************************************************/ int agtiapi_alloc_requests( struct agtiapi_softc *pmcsc ) { int rsize, nsegs; U32 next_tick; nsegs = AGTIAPI_NSEGS; rsize = AGTIAPI_MAX_DMA_SEGS; // 128 AGTIAPI_PRINTK( "agtiapi_alloc_requests: MAXPHYS 0x%x PAGE_SIZE 0x%x \n", MAXPHYS, PAGE_SIZE ); AGTIAPI_PRINTK( "agtiapi_alloc_requests: nsegs %d rsize %d \n", nsegs, rsize ); // 32, 128 // This is for csio->data_ptr if( bus_dma_tag_create( agNULL, // parent 1, // alignment 0, // boundary BUS_SPACE_MAXADDR, // lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, // filter NULL, // filterarg BUS_SPACE_MAXSIZE_32BIT, // maxsize nsegs, // nsegments BUS_SPACE_MAXSIZE_32BIT, // maxsegsize BUS_DMA_ALLOCNOW, // flags busdma_lock_mutex, // lockfunc &pmcsc->pCardInfo->pmIOLock, // lockarg &pmcsc->buffer_dmat ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_requests: Cannot alloc request DMA tag\n" ); return( ENOMEM ); } // This is for tiSgl_t of pccb in agtiapi_PrepCCBs() rsize = (sizeof(tiSgl_t) * AGTIAPI_NSEGS) * AGTIAPI_CCB_PER_DEVICE * maxTargets; AGTIAPI_PRINTK( "agtiapi_alloc_requests: rsize %d \n", rsize ); // 32, 128 if( bus_dma_tag_create( agNULL, // parent 32, // alignment 0, // boundary BUS_SPACE_MAXADDR_32BIT, // lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, // filter NULL, // filterarg rsize, // maxsize 1, // nsegments rsize, // maxsegsize BUS_DMA_ALLOCNOW, // flags NULL, // lockfunc NULL, // lockarg &pmcsc->tisgl_dmat ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_requests: Cannot alloc request DMA tag\n" ); return( ENOMEM ); } if( bus_dmamem_alloc( pmcsc->tisgl_dmat, (void **)&pmcsc->tisgl_mem, BUS_DMA_NOWAIT, &pmcsc->tisgl_map ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_requests: Cannot allocate SGL memory\n" ); return( ENOMEM ); } bzero( pmcsc->tisgl_mem, rsize ); bus_dmamap_load( pmcsc->tisgl_dmat, pmcsc->tisgl_map, pmcsc->tisgl_mem, rsize, agtiapi_SglMemoryCB, &pmcsc->tisgl_busaddr, BUS_DMA_NOWAIT /* 0 */ ); mtx_init( &pmcsc->OS_timer_lock, "OS timer lock", NULL, MTX_DEF ); mtx_init( &pmcsc->IO_timer_lock, "IO timer lock", NULL, MTX_DEF ); mtx_init( &pmcsc->devRmTimerLock, "targ rm timer lock", NULL, MTX_DEF ); callout_init_mtx( &pmcsc->OS_timer, &pmcsc->OS_timer_lock, 0 ); callout_init_mtx( &pmcsc->IO_timer, &pmcsc->IO_timer_lock, 0 ); callout_init_mtx( &pmcsc->devRmTimer, &pmcsc->devRmTimerLock, 0); next_tick = pmcsc->pCardInfo->tiRscInfo.tiLoLevelResource. loLevelOption.usecsPerTick / USEC_PER_TICK; AGTIAPI_PRINTK( "agtiapi_alloc_requests: before callout_reset, " "next_tick 0x%x\n", next_tick ); callout_reset( &pmcsc->OS_timer, next_tick, agtiapi_TITimer, pmcsc ); return 0; } /****************************************************************************** agtiapi_alloc_ostimem() Purpose: Allocates memory used later in ostiAllocMemory Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: This is a pre-allocation for ostiAllocMemory() "non-cacheable" function calls ******************************************************************************/ int agtiapi_alloc_ostimem( struct agtiapi_softc *pmcsc ) { int rsize, nomsize; nomsize = 4096; rsize = AGTIAPI_DYNAMIC_MAX * nomsize; // 8M AGTIAPI_PRINTK("agtiapi_alloc_ostimem: rsize %d \n", rsize); if( bus_dma_tag_create( agNULL, // parent 32, // alignment 0, // boundary BUS_SPACE_MAXADDR, // lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, // filter NULL, // filterarg rsize, // maxsize (size) 1, // number of segments rsize, // maxsegsize 0, // flags NULL, // lockfunc NULL, // lockarg &pmcsc->osti_dmat ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_ostimem: Can't create no-cache mem tag\n" ); return AGTIAPI_FAIL; } if( bus_dmamem_alloc( pmcsc->osti_dmat, &pmcsc->osti_mem, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_NOCACHE, &pmcsc->osti_mapp ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_ostimem: Cannot allocate cache mem %d\n", rsize ); return AGTIAPI_FAIL; } bus_dmamap_load( pmcsc->osti_dmat, pmcsc->osti_mapp, pmcsc->osti_mem, rsize, agtiapi_MemoryCB, // try reuse of CB for same goal &pmcsc->osti_busaddr, BUS_DMA_NOWAIT ); // populate all the ag_dma_addr_t osti_busaddr/mem fields with addresses for // handy reference when driver is in motion int idx; ag_card_info_t *pCardInfo = pmcsc->pCardInfo; ag_dma_addr_t *pMem; for( idx = 0; idx < AGTIAPI_DYNAMIC_MAX; idx++ ) { pMem = &pCardInfo->dynamicMem[idx]; pMem->nocache_busaddr = pmcsc->osti_busaddr + ( idx * nomsize ); pMem->nocache_mem = (void*)((U64)pmcsc->osti_mem + ( idx * nomsize )); pCardInfo->freeDynamicMem[idx] = &pCardInfo->dynamicMem[idx]; } pCardInfo->topOfFreeDynamicMem = AGTIAPI_DYNAMIC_MAX; return AGTIAPI_SUCCESS; } /****************************************************************************** agtiapi_cam_action() Purpose: Parses CAM frames and triggers a corresponding action Parameters: struct cam_sim *sim (IN) Pointer to SIM data structure union ccb * ccb (IN) Pointer to CAM ccb data structure Return: Note: ******************************************************************************/ static void agtiapi_cam_action( struct cam_sim *sim, union ccb * ccb ) { struct agtiapi_softc *pmcsc; tiDeviceHandle_t *pDevHandle = NULL; // acts as flag as well tiDeviceInfo_t devInfo; int pathID, targetID, lunID; int lRetVal; U32 TID; U32 speed = 150000; pmcsc = cam_sim_softc( sim ); AGTIAPI_IO( "agtiapi_cam_action: start pmcs %p\n", pmcsc ); if (pmcsc == agNULL) { AGTIAPI_PRINTK( "agtiapi_cam_action: start pmcs is NULL\n" ); return; } mtx_assert( &(pmcsc->pCardInfo->pmIOLock), MA_OWNED ); AGTIAPI_IO( "agtiapi_cam_action: cardNO %d func_code 0x%x\n", pmcsc->cardNo, ccb->ccb_h.func_code ); pathID = xpt_path_path_id( ccb->ccb_h.path ); targetID = xpt_path_target_id( ccb->ccb_h.path ); lunID = xpt_path_lun_id( ccb->ccb_h.path ); AGTIAPI_IO( "agtiapi_cam_action: P 0x%x T 0x%x L 0x%x\n", pathID, targetID, lunID ); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi; /* See architecure book p180*/ cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE | PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = maxTargets - 1; cpi->max_lun = AGTIAPI_MAX_LUN; cpi->maxio = 1024 *1024; /* Max supported I/O size, in bytes. */ cpi->initiator_id = 255; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "PMC", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); // rate is set when XPT_GET_TRAN_SETTINGS is processed cpi->base_transfer_speed = 150000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC3; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_sas *sas; struct ccb_trans_settings_scsi *scsi; if ( pmcsc->flags & AGTIAPI_SHUT_DOWN ) { return; } cts = &ccb->cts; sas = &ccb->cts.xport_specific.sas; scsi = &cts->proto_specific.scsi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC3; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; /* this sets the "MB/s transfers" */ if (pmcsc != NULL && targetID >= 0 && targetID < maxTargets) { if (pmcsc->pWWNList != NULL) { TID = INDEX(pmcsc, targetID); if (TID < maxTargets) { pDevHandle = pmcsc->pDevList[TID].pDevHandle; } } } if (pDevHandle) { tiINIGetDeviceInfo( &pmcsc->tiRoot, pDevHandle, &devInfo ); switch (devInfo.info.devType_S_Rate & 0xF) { case 0x8: speed = 150000; break; case 0x9: speed = 300000; break; case 0xA: speed = 600000; break; case 0xB: speed = 1200000; break; default: speed = 150000; break; } } sas->bitrate = speed; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: { lRetVal = agtiapi_eh_HostReset( pmcsc, ccb ); // usually works first time if ( SUCCESS == lRetVal ) { AGTIAPI_PRINTK( "agtiapi_cam_action: bus reset success.\n" ); } else { AGTIAPI_PRINTK( "agtiapi_cam_action: bus reset failed.\n" ); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_DEV: { ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_ABORT: { ccb->ccb_h.status = CAM_REQ_CMP; break; } #if __FreeBSD_version >= 900026 case XPT_SMP_IO: { agtiapi_QueueSMP( pmcsc, ccb ); return; } #endif /* __FreeBSD_version >= 900026 */ case XPT_SCSI_IO: { if(pmcsc->dev_scan == agFALSE) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } if (pmcsc->flags & AGTIAPI_SHUT_DOWN) { AGTIAPI_PRINTK( "agtiapi_cam_action: shutdown, XPT_SCSI_IO 0x%x\n", XPT_SCSI_IO ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } else { AGTIAPI_IO( "agtiapi_cam_action: Zero XPT_SCSI_IO 0x%x, doing IOs\n", XPT_SCSI_IO ); agtiapi_QueueCmnd_( pmcsc, ccb ); return; } } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, 1); ccb->ccb_h.status = CAM_REQ_CMP; break; } default: { /* XPT_SET_TRAN_SETTINGS */ AGTIAPI_IO( "agtiapi_cam_action: default function code 0x%x\n", ccb->ccb_h.func_code ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } } /* switch */ xpt_done(ccb); } /****************************************************************************** agtiapi_GetCCB() Purpose: Get a ccb from free list or allocate a new one Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA structure Return: Pointer to a ccb structure, or NULL if not available Note: ******************************************************************************/ STATIC pccb_t agtiapi_GetCCB( struct agtiapi_softc *pmcsc ) { pccb_t pccb; AGTIAPI_IO( "agtiapi_GetCCB: start\n" ); AG_LOCAL_LOCK( &pmcsc->ccbLock ); /* get the ccb from the head of the free list */ if ((pccb = (pccb_t)pmcsc->ccbFreeList) != NULL) { pmcsc->ccbFreeList = (caddr_t *)pccb->pccbNext; pccb->pccbNext = NULL; pccb->flags = ACTIVE; pccb->startTime = 0; pmcsc->activeCCB++; AGTIAPI_IO( "agtiapi_GetCCB: re-allocated ccb %p\n", pccb ); } else { AGTIAPI_PRINTK( "agtiapi_GetCCB: kmalloc ERROR - no ccb allocated\n" ); } AG_LOCAL_UNLOCK( &pmcsc->ccbLock ); return pccb; } /****************************************************************************** agtiapi_QueueCmnd_() Purpose: Calls for sending CCB and excuting on HBA. Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure union ccb * ccb (IN) Pointer to CAM ccb data structure Return: 0 - Command is pending to execute 1 - Command returned without further process Note: ******************************************************************************/ int agtiapi_QueueCmnd_(struct agtiapi_softc *pmcsc, union ccb * ccb) { struct ccb_scsiio *csio = &ccb->csio; pccb_t pccb = agNULL; // call dequeue int status = tiSuccess; U32 Channel = CMND_TO_CHANNEL(ccb); U32 TID = CMND_TO_TARGET(ccb); U32 LUN = CMND_TO_LUN(ccb); AGTIAPI_IO( "agtiapi_QueueCmnd_: start\n" ); /* no support for CBD > 16 */ if (csio->cdb_len > 16) { AGTIAPI_PRINTK( "agtiapi_QueueCmnd_: unsupported CDB length %d\n", csio->cdb_len ); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_INVALID;//CAM_REQ_CMP; xpt_done(ccb); return tiError; } if (TID < 0 || TID >= maxTargets) { AGTIAPI_PRINTK("agtiapi_QueueCmnd_: INVALID TID ERROR\n"); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE;//CAM_REQ_CMP; xpt_done(ccb); return tiError; } /* get a ccb */ if ((pccb = agtiapi_GetCCB(pmcsc)) == NULL) { AGTIAPI_PRINTK("agtiapi_QueueCmnd_: GetCCB ERROR\n"); if (pmcsc != NULL) { ag_device_t *targ; TID = INDEX(pmcsc, TID); targ = &pmcsc->pDevList[TID]; agtiapi_adjust_queue_depth(ccb->ccb_h.path,targ->qdepth); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_done(ccb); return tiBusy; } pccb->pmcsc = pmcsc; /* initialize Command Control Block (CCB) */ pccb->targetId = TID; pccb->lun = LUN; pccb->channel = Channel; pccb->ccb = ccb; /* for struct scsi_cmnd */ pccb->senseLen = csio->sense_len; pccb->startTime = ticks; pccb->pSenseData = (caddr_t) &csio->sense_data; pccb->tiSuperScsiRequest.flags = 0; /* each channel is reserved for different addr modes */ pccb->addrMode = agtiapi_AddrModes[Channel]; status = agtiapi_PrepareSGList(pmcsc, pccb); if (status != tiSuccess) { AGTIAPI_PRINTK("agtiapi_QueueCmnd_: agtiapi_PrepareSGList failure\n"); agtiapi_FreeCCB(pmcsc, pccb); if (status == tiReject) { ccb->ccb_h.status = CAM_REQ_INVALID; } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done( ccb ); return tiError; } return status; } /****************************************************************************** agtiapi_DumpCDB() Purpose: Prints out CDB Parameters: const char *ptitle (IN) A string to be printed ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_DumpCDB(const char *ptitle, ccb_t *pccb) { union ccb *ccb; struct ccb_scsiio *csio; bit8 cdb[64]; int len; if (pccb == NULL) { printf( "agtiapi_DumpCDB: no pccb here \n" ); panic("agtiapi_DumpCDB: pccb is NULL. called from %s\n", ptitle); return; } ccb = pccb->ccb; if (ccb == NULL) { printf( "agtiapi_DumpCDB: no ccb here \n" ); panic( "agtiapi_DumpCDB: pccb %p ccb %p flags %d ccb NULL! " "called from %s\n", pccb, pccb->ccb, pccb->flags, ptitle ); return; } csio = &ccb->csio; if (csio == NULL) { printf( "agtiapi_DumpCDB: no csio here \n" ); panic( "agtiapi_DumpCDB: pccb%p ccb%p flags%d csio NULL! called from %s\n", pccb, pccb->ccb, pccb->flags, ptitle ); return; } len = MIN(64, csio->cdb_len); if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, &cdb[0], len); } else { bcopy(csio->cdb_io.cdb_bytes, &cdb[0], len); } AGTIAPI_IO( "agtiapi_DumpCDB: pccb%p CDB0x%x csio->cdb_len %d" " len %d from %s\n", pccb, cdb[0], csio->cdb_len, len, ptitle ); return; } /****************************************************************************** agtiapi_DoSoftReset() Purpose: Do card reset Parameters: *data (IN) point to pmcsc (struct agtiapi_softc *) Return: Note: ******************************************************************************/ int agtiapi_DoSoftReset (struct agtiapi_softc *pmcsc) { int ret; unsigned long flags; pmcsc->flags |= AGTIAPI_SOFT_RESET; AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); ret = agtiapi_ResetCard( pmcsc, &flags ); AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); if( ret != AGTIAPI_SUCCESS ) return tiError; return SUCCESS; } /****************************************************************************** agtiapi_CheckIOTimeout() Purpose: Timeout function for SCSI IO or TM Parameters: *data (IN) point to pCard (ag_card_t *) Return: Note: ******************************************************************************/ STATIC void agtiapi_CheckIOTimeout(void *data) { U32 status = AGTIAPI_SUCCESS; ccb_t *pccb; struct agtiapi_softc *pmcsc; pccb_t pccb_curr; pccb_t pccb_next; pmcsc = (struct agtiapi_softc *)data; //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: Enter\n"); //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: Active CCB %d\n", pmcsc->activeCCB); pccb = (pccb_t)pmcsc->ccbChainList; /* if link is down, do nothing */ if ((pccb == NULL) || (pmcsc->activeCCB == 0)) { //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: goto restart_timer\n"); goto restart_timer; } AG_SPIN_LOCK_IRQ(agtiapi_host_lock, flags); if (pmcsc->flags & AGTIAPI_SHUT_DOWN) goto ext; pccb_curr = pccb; /* Walk thorugh the IO Chain linked list to find the pending io */ /* Set the TM flag based on the pccb type, i.e SCSI IO or TM cmd */ while (pccb_curr != NULL) { /* start from 1st ccb in the chain */ pccb_next = pccb_curr->pccbChainNext; if( (pccb_curr->flags == 0) || (pccb_curr->tiIORequest.tdData == NULL) || (pccb_curr->startTime == 0) /* && (pccb->startTime == 0) */) { //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: move to next element\n"); } else if ( ( (ticks-pccb_curr->startTime) >= ag_timeout_secs ) && !(pccb_curr->flags & TIMEDOUT) ) { AGTIAPI_PRINTK( "agtiapi_CheckIOTimeout: pccb %p timed out, call TM " "function -- flags=%x startTime=%ld tdData = %p\n", pccb_curr, pccb_curr->flags, pccb->startTime, pccb_curr->tiIORequest.tdData ); pccb_curr->flags |= TIMEDOUT; status = agtiapi_StartTM(pmcsc, pccb_curr); if (status == AGTIAPI_SUCCESS) { AGTIAPI_PRINTK( "agtiapi_CheckIOTimeout: TM Request sent with " "success\n" ); goto restart_timer; } else { #ifdef AGTIAPI_LOCAL_RESET /* abort request did not go through */ AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: Abort request failed\n"); /* TODO: call Soft reset here */ AGTIAPI_PRINTK( "agtiapi_CheckIOTimeout:in agtiapi_CheckIOTimeout() " "abort request did not go thru ==> soft reset#7, then " "restart timer\n" ); agtiapi_DoSoftReset (pmcsc); goto restart_timer; #endif } } pccb_curr = pccb_next; } restart_timer: callout_reset(&pmcsc->IO_timer, 1*hz, agtiapi_CheckIOTimeout, pmcsc); ext: AG_SPIN_UNLOCK_IRQ(agtiapi_host_lock, flags); return; } /****************************************************************************** agtiapi_StartTM() Purpose: DDI calls for aborting outstanding IO command Parameters: struct scsi_cmnd *pccb (IN) Pointer to the command to be aborted unsigned long flags (IN/out) spinlock flags used in locking from calling layers Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail ******************************************************************************/ int agtiapi_StartTM(struct agtiapi_softc *pCard, ccb_t *pccb) { ccb_t *pTMccb = NULL; U32 status = AGTIAPI_SUCCESS; ag_device_t *pDevice = NULL; U32 TMstatus = tiSuccess; AGTIAPI_PRINTK( "agtiapi_StartTM: pccb %p, pccb->flags %x\n", pccb, pccb->flags ); if (pccb == NULL) { AGTIAPI_PRINTK("agtiapi_StartTM: %p not found\n",pccb); status = AGTIAPI_SUCCESS; goto ext; } if (!pccb->tiIORequest.tdData) { /* should not be the case */ AGTIAPI_PRINTK("agtiapi_StartTM: ccb %p flag 0x%x tid %d no tdData " "ERROR\n", pccb, pccb->flags, pccb->targetId); status = AGTIAPI_FAIL; } else { /* If timedout CCB is TM_ABORT_TASK command, issue LocalAbort first to clear pending TM_ABORT_TASK */ /* Else Device State will not be put back to Operational, (refer FW) */ if (pccb->flags & TASK_MANAGEMENT) { if (tiINIIOAbort(&pCard->tiRoot, &pccb->tiIORequest) != tiSuccess) { AGTIAPI_PRINTK( "agtiapi_StartTM: LocalAbort Request for Abort_TASK " "TM failed\n" ); /* TODO: call Soft reset here */ AGTIAPI_PRINTK( "agtiapi_StartTM: in agtiapi_StartTM() abort " "tiINIIOAbort() failed ==> soft reset#8\n" ); agtiapi_DoSoftReset( pCard ); } else { AGTIAPI_PRINTK( "agtiapi_StartTM: LocalAbort for Abort_TASK TM " "Request sent\n" ); status = AGTIAPI_SUCCESS; } } else { /* get a ccb */ if ((pTMccb = agtiapi_GetCCB(pCard)) == NULL) { AGTIAPI_PRINTK("agtiapi_StartTM: TM resource unavailable!\n"); status = AGTIAPI_FAIL; goto ext; } pTMccb->pmcsc = pCard; pTMccb->targetId = pccb->targetId; pTMccb->devHandle = pccb->devHandle; if (pTMccb->targetId >= pCard->devDiscover) { AGTIAPI_PRINTK("agtiapi_StartTM: Incorrect dev Id in TM!\n"); status = AGTIAPI_FAIL; goto ext; } if (pTMccb->targetId < 0 || pTMccb->targetId >= maxTargets) { return AGTIAPI_FAIL; } if (INDEX(pCard, pTMccb->targetId) >= maxTargets) { return AGTIAPI_FAIL; } pDevice = &pCard->pDevList[INDEX(pCard, pTMccb->targetId)]; if ((pDevice == NULL) || !(pDevice->flags & ACTIVE)) { return AGTIAPI_FAIL; } /* save pending io to issue local abort at Task mgmt CB */ pTMccb->pccbIO = pccb; AGTIAPI_PRINTK( "agtiapi_StartTM: pTMccb %p flag %x tid %d via TM " "request !\n", pTMccb, pTMccb->flags, pTMccb->targetId ); pTMccb->flags &= ~(TASK_SUCCESS | ACTIVE); pTMccb->flags |= TASK_MANAGEMENT; TMstatus = tiINITaskManagement(&pCard->tiRoot, pccb->devHandle, AG_ABORT_TASK, &pccb->tiSuperScsiRequest.scsiCmnd.lun, &pccb->tiIORequest, &pTMccb->tiIORequest); if (TMstatus == tiSuccess) { AGTIAPI_PRINTK( "agtiapi_StartTM: TM_ABORT_TASK request success ccb " "%p, pTMccb %p\n", pccb, pTMccb ); pTMccb->startTime = ticks; status = AGTIAPI_SUCCESS; } else if (TMstatus == tiIONoDevice) { AGTIAPI_PRINTK( "agtiapi_StartTM: TM_ABORT_TASK request tiIONoDevice ccb " "%p, pTMccb %p\n", pccb, pTMccb ); status = AGTIAPI_SUCCESS; } else { AGTIAPI_PRINTK( "agtiapi_StartTM: TM_ABORT_TASK request failed ccb %p, " "pTMccb %p\n", pccb, pTMccb ); status = AGTIAPI_FAIL; agtiapi_FreeTMCCB(pCard, pTMccb); /* TODO */ /* call TM_TARGET_RESET */ } } } ext: AGTIAPI_PRINTK("agtiapi_StartTM: return %d flgs %x\n", status, (pccb) ? pccb->flags : -1); return status; } /* agtiapi_StartTM */ #if __FreeBSD_version > 901000 /****************************************************************************** agtiapi_PrepareSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSGList(struct agtiapi_softc *pmcsc, ccb_t *pccb) { union ccb *ccb = pccb->ccb; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_IO( "agtiapi_PrepareSGList: start\n" ); // agtiapi_DumpCDB("agtiapi_PrepareSGList", pccb); AGTIAPI_IO( "agtiapi_PrepareSGList: dxfer_len %d\n", csio->dxfer_len ); if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { switch((ccbh->flags & CAM_DATA_MASK)) { int error; struct bus_dma_segment seg; case CAM_DATA_VADDR: /* Virtual address that needs to translated into one or more physical address ranges. */ // int error; // AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_IO( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csio->data_ptr, csio->dxfer_len, agtiapi_PrepareSGListCB, pccb, BUS_DMA_NOWAIT/* 0 */ ); // AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if (error == EINPROGRESS) { /* So as to maintain ordering, freeze the controller queue until our mapping is returned. */ AGTIAPI_PRINTK("agtiapi_PrepareSGList: EINPROGRESS\n"); xpt_freeze_simq(pmcsc->sim, 1); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } break; case CAM_DATA_PADDR: /* We have been given a pointer to single physical buffer. */ /* pccb->tiSuperScsiRequest.sglVirtualAddr = seg.ds_addr; */ //struct bus_dma_segment seg; AGTIAPI_PRINTK("agtiapi_PrepareSGList: physical address\n"); seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; // * 0xFF to be defined agtiapi_PrepareSGListCB(pccb, &seg, 1, 0xAABBCCDD); break; default: AGTIAPI_PRINTK("agtiapi_PrepareSGList: unexpected case\n"); return tiReject; } } else { agtiapi_PrepareSGListCB(pccb, NULL, 0, 0xAAAAAAAA); } return tiSuccess; } #else /****************************************************************************** agtiapi_PrepareSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSGList(struct agtiapi_softc *pmcsc, ccb_t *pccb) { union ccb *ccb = pccb->ccb; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_IO( "agtiapi_PrepareSGList: start\n" ); // agtiapi_DumpCDB("agtiapi_PrepareSGList", pccb); AGTIAPI_IO( "agtiapi_PrepareSGList: dxfer_len %d\n", csio->dxfer_len ); if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { /* We've been given a pointer to a single buffer. */ if ((ccbh->flags & CAM_DATA_PHYS) == 0) { /* Virtual address that needs to translated into one or more physical address ranges. */ int error; // AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_IO( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csio->data_ptr, csio->dxfer_len, agtiapi_PrepareSGListCB, pccb, BUS_DMA_NOWAIT/* 0 */ ); // AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if (error == EINPROGRESS) { /* So as to maintain ordering, freeze the controller queue until our mapping is returned. */ AGTIAPI_PRINTK("agtiapi_PrepareSGList: EINPROGRESS\n"); xpt_freeze_simq(pmcsc->sim, 1); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } else { /* We have been given a pointer to single physical buffer. */ /* pccb->tiSuperScsiRequest.sglVirtualAddr = seg.ds_addr; */ struct bus_dma_segment seg; AGTIAPI_PRINTK("agtiapi_PrepareSGList: physical address\n"); seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; // * 0xFF to be defined agtiapi_PrepareSGListCB(pccb, &seg, 1, 0xAABBCCDD); } } else { AGTIAPI_PRINTK("agtiapi_PrepareSGList: unexpected case\n"); return tiReject; } } else { agtiapi_PrepareSGListCB(pccb, NULL, 0, 0xAAAAAAAA); } return tiSuccess; } #endif /****************************************************************************** agtiapi_PrepareSGListCB() Purpose: Callback function for bus_dmamap_load() This fuctions sends IO to LL layer. Parameters: void *arg (IN) Pointer to the HBA data structure bus_dma_segment_t *segs (IN) Pointer to dma segment int nsegs (IN) number of dma segment int error (IN) error Return: Note: ******************************************************************************/ static void agtiapi_PrepareSGListCB( void *arg, bus_dma_segment_t *segs, int nsegs, int error ) { pccb_t pccb = arg; union ccb *ccb = pccb->ccb; struct ccb_scsiio *csio = &ccb->csio; struct agtiapi_softc *pmcsc; tiIniScsiCmnd_t *pScsiCmnd; bit32 i; bus_dmasync_op_t op; U32_64 phys_addr; U08 *CDB; int io_is_encryptable = 0; unsigned long long start_lba = 0; ag_device_t *pDev; U32 TID = CMND_TO_TARGET(ccb); AGTIAPI_IO( "agtiapi_PrepareSGListCB: start, nsegs %d error 0x%x\n", nsegs, error ); pmcsc = pccb->pmcsc; if (error != tiSuccess) { if (error == 0xAABBCCDD || error == 0xAAAAAAAA) { // do nothing } else { AGTIAPI_PRINTK("agtiapi_PrepareSGListCB: error status 0x%x\n", error); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); bus_dmamap_destroy(pmcsc->buffer_dmat, pccb->CCB_dmamap); agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } } if (nsegs > AGTIAPI_MAX_DMA_SEGS) { AGTIAPI_PRINTK( "agtiapi_PrepareSGListCB: over the limit. nsegs %d" " AGTIAPI_MAX_DMA_SEGS %d\n", nsegs, AGTIAPI_MAX_DMA_SEGS ); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); bus_dmamap_destroy(pmcsc->buffer_dmat, pccb->CCB_dmamap); agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } /* fill in IO information */ pccb->dataLen = csio->dxfer_len; /* start fill in sgl structure */ if (nsegs == 1 && error == 0xAABBCCDD) { /* to be tested */ /* A single physical buffer */ AGTIAPI_PRINTK("agtiapi_PrepareSGListCB: nsegs is 1\n"); CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, segs[0].ds_addr); pccb->tiSuperScsiRequest.agSgl1.len = htole32(pccb->dataLen); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSgl); pccb->tiSuperScsiRequest.sglVirtualAddr = (void *)segs->ds_addr; pccb->numSgElements = 1; } else if (nsegs == 0 && error == 0xAAAAAAAA) { /* no data transfer */ AGTIAPI_IO( "agtiapi_PrepareSGListCB: no data transfer\n" ); pccb->tiSuperScsiRequest.agSgl1.len = 0; pccb->dataLen = 0; pccb->numSgElements = 0; } else { /* virtual/logical buffer */ if (nsegs == 1) { pccb->dataLen = segs[0].ds_len; CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, segs[0].ds_addr); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSgl); pccb->tiSuperScsiRequest.agSgl1.len = htole32(segs[0].ds_len); pccb->tiSuperScsiRequest.sglVirtualAddr = (void *)csio->data_ptr; pccb->numSgElements = nsegs; } else { pccb->dataLen = 0; /* loop */ for (i = 0; i < nsegs; i++) { pccb->sgList[i].len = htole32(segs[i].ds_len); CPU_TO_LE32(pccb->sgList[i], segs[i].ds_addr); pccb->sgList[i].type = htole32(tiSgl); pccb->dataLen += segs[i].ds_len; } /* for */ pccb->numSgElements = nsegs; /* set up sgl buffer address */ CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, pccb->tisgl_busaddr); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSglList); pccb->tiSuperScsiRequest.agSgl1.len = htole32(pccb->dataLen); pccb->tiSuperScsiRequest.sglVirtualAddr = (void *)csio->data_ptr; pccb->numSgElements = nsegs; } /* else */ } /* set data transfer direction */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { op = BUS_DMASYNC_PREWRITE; pccb->tiSuperScsiRequest.dataDirection = tiDirectionOut; } else { op = BUS_DMASYNC_PREREAD; pccb->tiSuperScsiRequest.dataDirection = tiDirectionIn; } pScsiCmnd = &pccb->tiSuperScsiRequest.scsiCmnd; pScsiCmnd->expDataLength = pccb->dataLen; if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, &pScsiCmnd->cdb[0], csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, &pScsiCmnd->cdb[0],csio->cdb_len); } CDB = &pScsiCmnd->cdb[0]; switch (CDB[0]) { case REQUEST_SENSE: /* requires different buffer */ /* This code should not be excercised because SAS support auto sense For the completeness, vtophys() is still used here. */ AGTIAPI_PRINTK("agtiapi_PrepareSGListCB: QueueCmnd - REQUEST SENSE new\n"); pccb->tiSuperScsiRequest.agSgl1.len = htole32(pccb->senseLen); phys_addr = vtophys(&csio->sense_data); CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, phys_addr); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSgl); pccb->dataLen = pccb->senseLen; pccb->numSgElements = 1; break; case INQUIRY: /* only using lun 0 for device type detection */ pccb->flags |= AGTIAPI_INQUIRY; break; case TEST_UNIT_READY: case RESERVE: case RELEASE: case START_STOP: pccb->tiSuperScsiRequest.agSgl1.len = 0; pccb->dataLen = 0; break; case READ_6: case WRITE_6: /* Extract LBA */ start_lba = ((CDB[1] & 0x1f) << 16) | (CDB[2] << 8) | (CDB[3]); #ifdef HIALEAH_ENCRYPTION io_is_encryptable = 1; #endif break; case READ_10: case WRITE_10: case READ_12: case WRITE_12: /* Extract LBA */ start_lba = (CDB[2] << 24) | (CDB[3] << 16) | (CDB[4] << 8) | (CDB[5]); #ifdef HIALEAH_ENCRYPTION io_is_encryptable = 1; #endif break; case READ_16: case WRITE_16: /* Extract LBA */ start_lba = (CDB[2] << 24) | (CDB[3] << 16) | (CDB[4] << 8) | (CDB[5]); start_lba <<= 32; start_lba |= ((CDB[6] << 24) | (CDB[7] << 16) | (CDB[8] << 8) | (CDB[9])); #ifdef HIALEAH_ENCRYPTION io_is_encryptable = 1; #endif break; default: break; } /* fill device lun based one address mode */ agtiapi_SetLunField(pccb); if (pccb->targetId < 0 || pccb->targetId >= maxTargets) { pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } if (INDEX(pmcsc, pccb->targetId) >= maxTargets) { pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } pDev = &pmcsc->pDevList[INDEX(pmcsc, pccb->targetId)]; #if 1 if ((pmcsc->flags & EDC_DATA) && (pDev->flags & EDC_DATA)) { /* * EDC support: * * Possible command supported - * READ_6, READ_10, READ_12, READ_16, READ_LONG, READ_BUFFER, * READ_DEFECT_DATA, etc. * WRITE_6, WRITE_10, WRITE_12, WRITE_16, WRITE_LONG, WRITE_LONG2, * WRITE_BUFFER, WRITE_VERIFY, WRITE_VERIFY_12, etc. * * Do some data length adjustment and set chip operation instruction. */ switch (CDB[0]) { case READ_6: case READ_10: case READ_12: case READ_16: // BUG_ON(pccb->tiSuperScsiRequest.flags & TI_SCSI_INITIATOR_ENCRYPT); #ifdef AGTIAPI_TEST_DIF pccb->tiSuperScsiRequest.flags |= TI_SCSI_INITIATOR_DIF; #endif pccb->flags |= EDC_DATA; #ifdef TEST_VERIFY_AND_FORWARD pccb->tiSuperScsiRequest.Dif.flags = DIF_VERIFY_FORWARD | DIF_UDT_REF_BLOCK_COUNT; if(pDev->sector_size == 520) { pScsiCmnd->expDataLength += (pccb->dataLen / 512) * 8; } else if(pDev->sector_size == 4104) { pScsiCmnd->expDataLength += (pccb->dataLen / 4096) * 8; } #else #ifdef AGTIAPI_TEST_DIF pccb->tiSuperScsiRequest.Dif.flags = DIF_VERIFY_DELETE | DIF_UDT_REF_BLOCK_COUNT; #endif #endif #ifdef AGTIAPI_TEST_DIF switch(pDev->sector_size) { case 528: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_520 << 16 ); break; case 4104: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4096 << 16 ); break; case 4168: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4160 << 16 ); break; } if(pCard->flags & EDC_DATA_CRC) pccb->tiSuperScsiRequest.Dif.flags |= DIF_CRC_VERIFICATION; /* Turn on upper 4 bits of UVM */ pccb->tiSuperScsiRequest.Dif.flags |= 0x03c00000; #endif #ifdef AGTIAPI_TEST_DPL if(agtiapi_SetupDifPerLA(pCard, pccb, start_lba) < 0) { printk(KERN_ERR "SetupDifPerLA Failed.\n"); cmnd->result = SCSI_HOST(DID_ERROR); goto err; } pccb->tiSuperScsiRequest.Dif.enableDIFPerLA = TRUE; #endif #ifdef AGTIAPI_TEST_DIF /* Set App Tag */ pccb->tiSuperScsiRequest.Dif.udtArray[0] = 0xaa; pccb->tiSuperScsiRequest.Dif.udtArray[1] = 0xbb; /* Set LBA in UDT array */ if(CDB[0] == READ_6) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[2]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[1] & 0x1f; pccb->tiSuperScsiRequest.Dif.udtArray[5] = 0; } else if(CDB[0] == READ_10 || CDB[0] == READ_12) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[5]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[4]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[2]; } else if(CDB[0] == READ_16) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[9]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[8]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[7]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[6]; /* Note: 32 bits lost */ } #endif break; case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: // BUG_ON(pccb->tiSuperScsiRequest.flags & TI_SCSI_INITIATOR_ENCRYPT); pccb->flags |= EDC_DATA; #ifdef AGTIAPI_TEST_DIF pccb->tiSuperScsiRequest.flags |= TI_SCSI_INITIATOR_DIF; pccb->tiSuperScsiRequest.Dif.flags = DIF_INSERT | DIF_UDT_REF_BLOCK_COUNT; switch(pDev->sector_size) { case 528: pccb->tiSuperScsiRequest.Dif.flags |= (DIF_BLOCK_SIZE_520 << 16); break; case 4104: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4096 << 16 ); break; case 4168: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4160 << 16 ); break; } /* Turn on upper 4 bits of UUM */ pccb->tiSuperScsiRequest.Dif.flags |= 0xf0000000; #endif #ifdef AGTIAPI_TEST_DPL if(agtiapi_SetupDifPerLA(pCard, pccb, start_lba) < 0) { printk(KERN_ERR "SetupDifPerLA Failed.\n"); cmnd->result = SCSI_HOST(DID_ERROR); goto err; } pccb->tiSuperScsiRequest.Dif.enableDIFPerLA = TRUE; #endif #ifdef AGTIAPI_TEST_DIF /* Set App Tag */ pccb->tiSuperScsiRequest.Dif.udtArray[0] = 0xaa; pccb->tiSuperScsiRequest.Dif.udtArray[1] = 0xbb; /* Set LBA in UDT array */ if(CDB[0] == WRITE_6) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[2]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[1] & 0x1f; } else if(CDB[0] == WRITE_10 || CDB[0] == WRITE_12) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[5]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[4]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[2]; } else if(CDB[0] == WRITE_16) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[5]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[4]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[2]; /* Note: 32 bits lost */ } #endif break; } } #endif /* end of DIF */ if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch(csio->tag_action) { case MSG_HEAD_OF_Q_TAG: pScsiCmnd->taskAttribute = TASK_HEAD_OF_QUEUE; break; case MSG_ACA_TASK: pScsiCmnd->taskAttribute = TASK_ACA; break; case MSG_ORDERED_Q_TAG: pScsiCmnd->taskAttribute = TASK_ORDERED; break; case MSG_SIMPLE_Q_TAG: /* fall through */ default: pScsiCmnd->taskAttribute = TASK_SIMPLE; break; } } if (pccb->tiSuperScsiRequest.agSgl1.len != 0 && pccb->dataLen != 0) { /* should be just before start IO */ bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); } /* * If assigned pDevHandle is not available * then there is no need to send it to StartIO() */ if (pccb->targetId < 0 || pccb->targetId >= maxTargets) { pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } TID = INDEX(pmcsc, pccb->targetId); if ((TID >= pmcsc->devDiscover) || !(pccb->devHandle = pmcsc->pDevList[TID].pDevHandle)) { /* AGTIAPI_PRINTK( "agtiapi_PrepareSGListCB: not sending ccb devH %p," " target %d tid %d/%d card %p ERROR pccb %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc, pccb ); */ pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } AGTIAPI_IO( "agtiapi_PrepareSGListCB: send ccb pccb->devHandle %p, " "pccb->targetId %d TID %d pmcsc->devDiscover %d card %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc ); #ifdef HIALEAH_ENCRYPTION if(pmcsc->encrypt && io_is_encryptable) { agtiapi_SetupEncryptedIO(pmcsc, pccb, start_lba); } else{ io_is_encryptable = 0; pccb->tiSuperScsiRequest.flags = 0; } #endif // put the request in send queue agtiapi_QueueCCB( pmcsc, &pmcsc->ccbSendHead, &pmcsc->ccbSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendLock), pccb ); agtiapi_StartIO(pmcsc); return; } /****************************************************************************** agtiapi_StartIO() Purpose: Send IO request down for processing. Parameters: (struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_StartIO( struct agtiapi_softc *pmcsc ) { ccb_t *pccb; int TID; ag_device_t *targ; AGTIAPI_IO( "agtiapi_StartIO: start\n" ); AG_LOCAL_LOCK( &pmcsc->sendLock ); pccb = pmcsc->ccbSendHead; /* if link is down, do nothing */ if ((pccb == NULL) || pmcsc->flags & AGTIAPI_RESET) { AG_LOCAL_UNLOCK( &pmcsc->sendLock ); AGTIAPI_PRINTK( "agtiapi_StartIO: goto ext\n" ); goto ext; } if (pmcsc != NULL && pccb->targetId >= 0 && pccb->targetId < maxTargets) { TID = INDEX(pmcsc, pccb->targetId); targ = &pmcsc->pDevList[TID]; } /* clear send queue */ pmcsc->ccbSendHead = NULL; pmcsc->ccbSendTail = NULL; AG_LOCAL_UNLOCK( &pmcsc->sendLock ); /* send all ccbs down */ while (pccb) { pccb_t pccb_next; U32 status; pccb_next = pccb->pccbNext; pccb->pccbNext = NULL; if (!pccb->ccb) { AGTIAPI_PRINTK( "agtiapi_StartIO: pccb->ccb is NULL ERROR!\n" ); pccb = pccb_next; continue; } AG_IO_DUMPCCB( pccb ); if (!pccb->devHandle) { agtiapi_DumpCCB( pccb ); AGTIAPI_PRINTK( "agtiapi_StartIO: ccb NULL device ERROR!\n" ); pccb = pccb_next; continue; } AGTIAPI_IO( "agtiapi_StartIO: ccb %p retry %d\n", pccb, pccb->retryCount ); #ifndef ABORT_TEST if( !pccb->devHandle || !pccb->devHandle->osData || /* in rmmod case */ !(((ag_device_t *)(pccb->devHandle->osData))->flags & ACTIVE)) { AGTIAPI_PRINTK( "agtiapi_StartIO: device %p not active! ERROR\n", pccb->devHandle ); if( pccb->devHandle ) { AGTIAPI_PRINTK( "agtiapi_StartIO: device not active detail" " -- osData:%p\n", pccb->devHandle->osData ); if( pccb->devHandle->osData ) { AGTIAPI_PRINTK( "agtiapi_StartIO: more device not active detail" " -- active flag:%d\n", ( (ag_device_t *) (pccb->devHandle->osData))->flags & ACTIVE ); } } pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_Done( pmcsc, pccb ); pccb = pccb_next; continue; } #endif #ifdef FAST_IO_TEST status = agtiapi_FastIOTest( pmcsc, pccb ); #else status = tiINISuperIOStart( &pmcsc->tiRoot, &pccb->tiIORequest, pccb->devHandle, &pccb->tiSuperScsiRequest, (void *)&pccb->tdIOReqBody, tiInterruptContext ); #endif switch( status ) { case tiSuccess: /* static int squelchCount = 0; if ( 200000 == squelchCount++ ) // squelch prints { AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart stat tiSuccess %p\n", pccb ); squelchCount = 0; // reset count } */ break; case tiDeviceBusy: AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart status tiDeviceBusy %p\n", pccb->ccb ); #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart tiDeviceBusy " ); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDeviceBusy; agtiapi_Done(pmcsc, pccb); break; case tiBusy: AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart status tiBusy %p\n", pccb->ccb ); #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart tiBusy " ); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiBusy; agtiapi_Done(pmcsc, pccb); break; case tiIONoDevice: AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart status tiNoDevice %p " "ERROR\n", pccb->ccb ); #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart invalid device handle " ); #endif #ifndef ABORT_TEST /* return command back to OS due to no device available */ ((ag_device_t *)(pccb->devHandle->osData))->flags &= ~ACTIVE; pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_Done(pmcsc, pccb); #else /* for short cable pull, we want IO retried - 3-18-2005 */ agtiapi_QueueCCB(pmcsc, &pmcsc->ccbSendHead, &pmcsc->ccbSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendLock), pccb); #endif break; case tiError: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status tiError %p\n", pccb->ccb); #ifdef LOGEVENT agtiapi_LogEvent(pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart tiError "); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailOtherError; agtiapi_Done(pmcsc, pccb); break; default: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status default %x %p\n", status, pccb->ccb); #ifdef LOGEVENT agtiapi_LogEvent(pmcsc, IOCTL_EVT_SEV_ERROR, 0, agNULL, 0, "tiINIIOStart unexpected status "); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailOtherError; agtiapi_Done(pmcsc, pccb); } pccb = pccb_next; } ext: /* some IO requests might have been completed */ AG_GET_DONE_PCCB(pccb, pmcsc); return; } /****************************************************************************** agtiapi_StartSMP() Purpose: Send SMP request down for processing. Parameters: (struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_StartSMP(struct agtiapi_softc *pmcsc) { ccb_t *pccb; AGTIAPI_PRINTK("agtiapi_StartSMP: start\n"); AG_LOCAL_LOCK(&pmcsc->sendSMPLock); pccb = pmcsc->smpSendHead; /* if link is down, do nothing */ if ((pccb == NULL) || pmcsc->flags & AGTIAPI_RESET) { AG_LOCAL_UNLOCK(&pmcsc->sendSMPLock); AGTIAPI_PRINTK("agtiapi_StartSMP: goto ext\n"); goto ext; } /* clear send queue */ pmcsc->smpSendHead = NULL; pmcsc->smpSendTail = NULL; AG_LOCAL_UNLOCK(&pmcsc->sendSMPLock); /* send all ccbs down */ while (pccb) { pccb_t pccb_next; U32 status; pccb_next = pccb->pccbNext; pccb->pccbNext = NULL; if (!pccb->ccb) { AGTIAPI_PRINTK("agtiapi_StartSMP: pccb->ccb is NULL ERROR!\n"); pccb = pccb_next; continue; } if (!pccb->devHandle) { AGTIAPI_PRINTK("agtiapi_StartSMP: ccb NULL device ERROR!\n"); pccb = pccb_next; continue; } pccb->flags |= TAG_SMP; // mark as SMP for later tracking AGTIAPI_PRINTK( "agtiapi_StartSMP: ccb %p retry %d\n", pccb, pccb->retryCount ); status = tiINISMPStart( &pmcsc->tiRoot, &pccb->tiIORequest, pccb->devHandle, &pccb->tiSMPFrame, (void *)&pccb->tdIOReqBody, tiInterruptContext); switch (status) { case tiSuccess: break; case tiBusy: AGTIAPI_PRINTK("agtiapi_StartSMP: tiINISMPStart status tiBusy %p\n", pccb->ccb); /* pending ccb back to send queue */ agtiapi_QueueCCB(pmcsc, &pmcsc->smpSendHead, &pmcsc->smpSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendSMPLock), pccb); break; case tiError: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status tiError %p\n", pccb->ccb); pccb->ccbStatus = tiSMPFailed; agtiapi_SMPDone(pmcsc, pccb); break; default: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status default %x %p\n", status, pccb->ccb); pccb->ccbStatus = tiSMPFailed; agtiapi_SMPDone(pmcsc, pccb); } pccb = pccb_next; } ext: /* some SMP requests might have been completed */ AG_GET_DONE_SMP_PCCB(pccb, pmcsc); return; } #if __FreeBSD_version > 901000 /****************************************************************************** agtiapi_PrepareSMPSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSMPSGList( struct agtiapi_softc *pmcsc, ccb_t *pccb ) { /* Pointer to CAM's ccb */ union ccb *ccb = pccb->ccb; struct ccb_smpio *csmpio = &ccb->smpio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_PRINTK("agtiapi_PrepareSMPSGList: start\n"); switch((ccbh->flags & CAM_DATA_MASK)) { case CAM_DATA_PADDR: case CAM_DATA_SG_PADDR: AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Physical Address not supported\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return tiReject; case CAM_DATA_SG: /* * Currently we do not support Multiple SG list * return error for now */ if ( (csmpio->smp_request_sglist_cnt > 1) || (csmpio->smp_response_sglist_cnt > 1) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Multiple SG list not supported\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return tiReject; } } if ( csmpio->smp_request_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_PRINTK("agtiapi_PrepareSGList: virtual address\n"); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_request, csmpio->smp_request_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK(&(pmcsc->pCardInfo->pmIOLock)); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } if( csmpio->smp_response_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK( &(pmcsc->pCardInfo->pmIOLock) ); AGTIAPI_PRINTK( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_response, csmpio->smp_response_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if ( error == EINPROGRESS ) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } else { if ( (csmpio->smp_request_sglist_cnt == 0) && (csmpio->smp_response_sglist_cnt == 0) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: physical address\n" ); pccb->tiSMPFrame.outFrameBuf = (void *)csmpio->smp_request; pccb->tiSMPFrame.outFrameLen = csmpio->smp_request_len; pccb->tiSMPFrame.expectedRespLen = csmpio->smp_response_len; // 0xFF to be defined agtiapi_PrepareSMPSGListCB( pccb, NULL, 0, 0xAABBCCDD ); } pccb->tiSMPFrame.flag = 0; } return tiSuccess; } #else /****************************************************************************** agtiapi_PrepareSMPSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSMPSGList( struct agtiapi_softc *pmcsc, ccb_t *pccb ) { /* Pointer to CAM's ccb */ union ccb *ccb = pccb->ccb; struct ccb_smpio *csmpio = &ccb->smpio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_PRINTK("agtiapi_PrepareSMPSGList: start\n"); if (ccbh->flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Physical Address " "not supported\n" ); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); - return tiReject;; + return tiReject; } if (ccbh->flags & CAM_SCATTER_VALID) { /* * Currently we do not support Multiple SG list * return error for now */ if ( (csmpio->smp_request_sglist_cnt > 1) || (csmpio->smp_response_sglist_cnt > 1) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Multiple SG list " "not supported\n" ); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); - return tiReject;; + return tiReject; } if ( csmpio->smp_request_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_PRINTK("agtiapi_PrepareSGList: virtual address\n"); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_request, csmpio->smp_request_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK(&(pmcsc->pCardInfo->pmIOLock)); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } if( csmpio->smp_response_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK( &(pmcsc->pCardInfo->pmIOLock) ); AGTIAPI_PRINTK( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_response, csmpio->smp_response_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if ( error == EINPROGRESS ) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } } else { if ( (csmpio->smp_request_sglist_cnt == 0) && (csmpio->smp_response_sglist_cnt == 0) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: physical address\n" ); pccb->tiSMPFrame.outFrameBuf = (void *)csmpio->smp_request; pccb->tiSMPFrame.outFrameLen = csmpio->smp_request_len; pccb->tiSMPFrame.expectedRespLen = csmpio->smp_response_len; // 0xFF to be defined agtiapi_PrepareSMPSGListCB( pccb, NULL, 0, 0xAABBCCDD ); } pccb->tiSMPFrame.flag = 0; } return tiSuccess; } #endif /****************************************************************************** agtiapi_PrepareSMPSGListCB() Purpose: Callback function for bus_dmamap_load() This fuctions sends IO to LL layer. Parameters: void *arg (IN) Pointer to the HBA data structure bus_dma_segment_t *segs (IN) Pointer to dma segment int nsegs (IN) number of dma segment int error (IN) error Return: Note: ******************************************************************************/ static void agtiapi_PrepareSMPSGListCB( void *arg, bus_dma_segment_t *segs, int nsegs, int error ) { pccb_t pccb = arg; union ccb *ccb = pccb->ccb; struct agtiapi_softc *pmcsc; U32 TID = CMND_TO_TARGET(ccb); int status; tiDeviceHandle_t *tiExpDevHandle; tiPortalContext_t *tiExpPortalContext; ag_portal_info_t *tiExpPortalInfo; AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: start, nsegs %d error 0x%x\n", nsegs, error ); pmcsc = pccb->pmcsc; if ( error != tiSuccess ) { if (error == 0xAABBCCDD) { // do nothing } else { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: error status 0x%x\n", error ); bus_dmamap_unload( pmcsc->buffer_dmat, pccb->CCB_dmamap ); bus_dmamap_destroy( pmcsc->buffer_dmat, pccb->CCB_dmamap ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done( ccb ); return; } } if ( nsegs > AGTIAPI_MAX_DMA_SEGS ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: over the limit. nsegs %d " "AGTIAPI_MAX_DMA_SEGS %d\n", nsegs, AGTIAPI_MAX_DMA_SEGS ); bus_dmamap_unload( pmcsc->buffer_dmat, pccb->CCB_dmamap ); bus_dmamap_destroy( pmcsc->buffer_dmat, pccb->CCB_dmamap ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done( ccb ); return; } /* * If assigned pDevHandle is not available * then there is no need to send it to StartIO() */ /* TODO: Add check for deviceType */ if ( pccb->targetId < 0 || pccb->targetId >= maxTargets ) { agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); pccb->ccb = NULL; return; } TID = INDEX( pmcsc, pccb->targetId ); if ( (TID >= pmcsc->devDiscover) || !(pccb->devHandle = pmcsc->pDevList[TID].pDevHandle) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: not sending ccb devH %p, " "target %d tid %d/%d " "card %p ERROR pccb %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc, pccb ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done( ccb ); pccb->ccb = NULL; return; } /* TODO: add indirect handling */ /* set the flag correctly based on Indiret SMP request and response */ AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: send ccb pccb->devHandle %p, " "pccb->targetId %d TID %d pmcsc->devDiscover %d card %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc ); tiExpDevHandle = pccb->devHandle; tiExpPortalInfo = pmcsc->pDevList[TID].pPortalInfo; tiExpPortalContext = &tiExpPortalInfo->tiPortalContext; /* Look for the expander associated with the ses device */ status = tiINIGetExpander( &pmcsc->tiRoot, tiExpPortalContext, pccb->devHandle, &tiExpDevHandle ); if ( status != tiSuccess ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: Error getting Expander " "device\n" ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done( ccb ); pccb->ccb = NULL; return; } /* this is expander device */ pccb->devHandle = tiExpDevHandle; /* put the request in send queue */ agtiapi_QueueCCB( pmcsc, &pmcsc->smpSendHead, &pmcsc->smpSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendSMPLock), pccb ); agtiapi_StartSMP( pmcsc ); return; } /****************************************************************************** agtiapi_Done() Purpose: Processing completed ccbs Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_Done(struct agtiapi_softc *pmcsc, ccb_t *pccb) { pccb_t pccb_curr = pccb; pccb_t pccb_next; tiIniScsiCmnd_t *cmnd; union ccb * ccb; AGTIAPI_IO("agtiapi_Done: start\n"); while (pccb_curr) { /* start from 1st ccb in the chain */ pccb_next = pccb_curr->pccbNext; if (agtiapi_CheckError(pmcsc, pccb_curr) != 0) { /* send command back and release the ccb */ cmnd = &pccb_curr->tiSuperScsiRequest.scsiCmnd; if (cmnd->cdb[0] == RECEIVE_DIAGNOSTIC) { AGTIAPI_PRINTK("agtiapi_Done: RECEIVE_DIAG pg %d id %d cmnd %p pccb " "%p\n", cmnd->cdb[2], pccb_curr->targetId, cmnd, pccb_curr); } CMND_DMA_UNMAP(pmcsc, ccb); /* send the request back to the CAM */ ccb = pccb_curr->ccb; agtiapi_FreeCCB(pmcsc, pccb_curr); xpt_done(ccb); } pccb_curr = pccb_next; } return; } /****************************************************************************** agtiapi_SMPDone() Purpose: Processing completed ccbs Parameters: struct agtiapi_softc *pmcsc (IN) Ponter to HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_SMPDone(struct agtiapi_softc *pmcsc, ccb_t *pccb) { pccb_t pccb_curr = pccb; pccb_t pccb_next; union ccb * ccb; AGTIAPI_PRINTK("agtiapi_SMPDone: start\n"); while (pccb_curr) { /* start from 1st ccb in the chain */ pccb_next = pccb_curr->pccbNext; if (agtiapi_CheckSMPError(pmcsc, pccb_curr) != 0) { CMND_DMA_UNMAP(pmcsc, ccb); /* send the request back to the CAM */ ccb = pccb_curr->ccb; agtiapi_FreeSMPCCB(pmcsc, pccb_curr); xpt_done(ccb); } pccb_curr = pccb_next; } AGTIAPI_PRINTK("agtiapi_SMPDone: Done\n"); return; } /****************************************************************************** agtiapi_hexdump() Purpose: Utility function for dumping in hex Parameters: const char *ptitle (IN) A string to be printed bit8 *pbuf (IN) A pointer to a buffer to be printed. int len (IN) The lengther of the buffer Return: Note: ******************************************************************************/ void agtiapi_hexdump(const char *ptitle, bit8 *pbuf, int len) { int i; AGTIAPI_PRINTK("%s - hexdump(len=%d):\n", ptitle, (int)len); if (!pbuf) { AGTIAPI_PRINTK("pbuf is NULL\n"); return; } for (i = 0; i < len; ) { if (len - i > 4) { AGTIAPI_PRINTK( " 0x%02x, 0x%02x, 0x%02x, 0x%02x,\n", pbuf[i], pbuf[i+1], pbuf[i+2], pbuf[i+3] ); i += 4; } else { AGTIAPI_PRINTK(" 0x%02x,", pbuf[i]); i++; } } AGTIAPI_PRINTK("\n"); } /****************************************************************************** agtiapi_CheckError() Purpose: Processes status pertaining to the ccb -- whether it was completed successfully, aborted, or error encountered. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure ccb_t *pccd (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - the command retry is required 1 - the command process is completed Note: ******************************************************************************/ STATIC U32 agtiapi_CheckError(struct agtiapi_softc *pmcsc, ccb_t *pccb) { ag_device_t *pDevice; // union ccb * ccb = pccb->ccb; union ccb * ccb; int is_error, TID; if (pccb == NULL) { return 0; } ccb = pccb->ccb; AGTIAPI_IO("agtiapi_CheckError: start\n"); if (ccb == NULL) { /* shouldn't be here but just in case we do */ AGTIAPI_PRINTK("agtiapi_CheckError: CCB orphan = %p ERROR\n", pccb); agtiapi_FreeCCB(pmcsc, pccb); return 0; } is_error = 1; pDevice = NULL; if (pmcsc != NULL && pccb->targetId >= 0 && pccb->targetId < maxTargets) { if (pmcsc->pWWNList != NULL) { TID = INDEX(pmcsc, pccb->targetId); if (TID < maxTargets) { pDevice = &pmcsc->pDevList[TID]; if (pDevice != NULL) { is_error = 0; } } } } if (is_error) { AGTIAPI_PRINTK("agtiapi_CheckError: pDevice == NULL\n"); agtiapi_FreeCCB(pmcsc, pccb); return 0; } /* SCSI status */ ccb->csio.scsi_status = pccb->scsiStatus; if(pDevice->CCBCount > 0){ atomic_subtract_int(&pDevice->CCBCount,1); } AG_LOCAL_LOCK(&pmcsc->freezeLock); if(pmcsc->freezeSim == agTRUE) { pmcsc->freezeSim = agFALSE; xpt_release_simq(pmcsc->sim, 1); } AG_LOCAL_UNLOCK(&pmcsc->freezeLock); switch (pccb->ccbStatus) { case tiIOSuccess: AGTIAPI_IO("agtiapi_CheckError: tiIOSuccess pccb %p\n", pccb); /* CAM status */ if (pccb->scsiStatus == SCSI_STATUS_OK) { ccb->ccb_h.status = CAM_REQ_CMP; } else if (pccb->scsiStatus == SCSI_TASK_ABORTED) { ccb->ccb_h.status = CAM_REQ_ABORTED; } else { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; } if (ccb->csio.scsi_status == SCSI_CHECK_CONDITION) { ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } break; case tiIOOverRun: AGTIAPI_PRINTK("agtiapi_CheckError: tiIOOverRun pccb %p\n", pccb); /* resid is ignored for this condition */ ccb->csio.resid = 0; ccb->ccb_h.status = CAM_DATA_RUN_ERR; break; case tiIOUnderRun: AGTIAPI_PRINTK("agtiapi_CheckError: tiIOUnderRun pccb %p\n", pccb); ccb->csio.resid = pccb->scsiStatus; ccb->ccb_h.status = CAM_REQ_CMP; ccb->csio.scsi_status = SCSI_STATUS_OK; break; case tiIOFailed: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed %d id %d ERROR\n", pccb, pccb->scsiStatus, pccb->targetId ); if (pccb->scsiStatus == tiDeviceBusy) { AGTIAPI_IO( "agtiapi_CheckError: pccb %p tiIOFailed - tiDetailBusy\n", pccb ); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); } } else if(pccb->scsiStatus == tiBusy) { AG_LOCAL_LOCK(&pmcsc->freezeLock); if(pmcsc->freezeSim == agFALSE) { pmcsc->freezeSim = agTRUE; xpt_freeze_simq(pmcsc->sim, 1); } AG_LOCAL_UNLOCK(&pmcsc->freezeLock); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQUEUE_REQ; } else if (pccb->scsiStatus == tiDetailNoLogin) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailNoLogin ERROR\n", pccb ); ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else if (pccb->scsiStatus == tiDetailNotValid) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailNotValid ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_INVALID; } else if (pccb->scsiStatus == tiDetailAbortLogin) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailAbortLogin ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } else if (pccb->scsiStatus == tiDetailAbortReset) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailAbortReset ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } else if (pccb->scsiStatus == tiDetailAborted) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailAborted ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } else if (pccb->scsiStatus == tiDetailOtherError) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailOtherError ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } break; case tiIODifError: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed %d id %d ERROR\n", pccb, pccb->scsiStatus, pccb->targetId ); if (pccb->scsiStatus == tiDetailDifAppTagMismatch) { AGTIAPI_IO( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailDifAppTagMismatch\n", pccb ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else if (pccb->scsiStatus == tiDetailDifRefTagMismatch) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailDifRefTagMismatch\n", pccb ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else if (pccb->scsiStatus == tiDetailDifCrcMismatch) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailDifCrcMismatch\n", pccb ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; } break; #ifdef HIALEAH_ENCRYPTION case tiIOEncryptError: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed %d id %d ERROR\n", pccb, pccb->scsiStatus, pccb->targetId ); if (pccb->scsiStatus == tiDetailDekKeyCacheMiss) { AGTIAPI_PRINTK( "agtiapi_CheckError: %s: pccb %p tiIOFailed - " "tiDetailDekKeyCacheMiss ERROR\n", __FUNCTION__, pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; agtiapi_HandleEncryptedIOFailure(pDevice, pccb); } else if (pccb->scsiStatus == tiDetailDekIVMismatch) { AGTIAPI_PRINTK( "agtiapi_CheckError: %s: pccb %p tiIOFailed - " "tiDetailDekIVMismatch ERROR\n", __FUNCTION__, pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; agtiapi_HandleEncryptedIOFailure(pDevice, pccb); } break; #endif default: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOdefault %d id %d ERROR\n", pccb, pccb->ccbStatus, pccb->targetId ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } return 1; } /****************************************************************************** agtiapi_SMPCheckError() Purpose: Processes status pertaining to the ccb -- whether it was completed successfully, aborted, or error encountered. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure ccb_t *pccd (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - the command retry is required 1 - the command process is completed Note: ******************************************************************************/ STATIC U32 agtiapi_CheckSMPError( struct agtiapi_softc *pmcsc, ccb_t *pccb ) { union ccb * ccb = pccb->ccb; AGTIAPI_PRINTK("agtiapi_CheckSMPError: start\n"); if (!ccb) { /* shouldn't be here but just in case we do */ AGTIAPI_PRINTK( "agtiapi_CheckSMPError: CCB orphan = %p ERROR\n", pccb ); agtiapi_FreeSMPCCB(pmcsc, pccb); return 0; } switch (pccb->ccbStatus) { case tiSMPSuccess: AGTIAPI_PRINTK( "agtiapi_CheckSMPError: tiSMPSuccess pccb %p\n", pccb ); /* CAM status */ ccb->ccb_h.status = CAM_REQ_CMP; break; case tiSMPFailed: AGTIAPI_PRINTK( "agtiapi_CheckSMPError: tiSMPFailed pccb %p\n", pccb ); /* CAM status */ ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; default: AGTIAPI_PRINTK( "agtiapi_CheckSMPError: pccb %p tiSMPdefault %d " "id %d ERROR\n", pccb, pccb->ccbStatus, pccb->targetId ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } return 1; } /****************************************************************************** agtiapi_HandleEncryptedIOFailure(): Purpose: Parameters: Return: Note: Currently not used. ******************************************************************************/ void agtiapi_HandleEncryptedIOFailure(ag_device_t *pDev, ccb_t *pccb) { AGTIAPI_PRINTK("agtiapi_HandleEncryptedIOFailure: start\n"); return; } /****************************************************************************** agtiapi_Retry() Purpose: Retry a ccb. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to the HBA structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: Currently not used. ******************************************************************************/ STATIC void agtiapi_Retry(struct agtiapi_softc *pmcsc, ccb_t *pccb) { pccb->retryCount++; pccb->flags = ACTIVE | AGTIAPI_RETRY; pccb->ccbStatus = 0; pccb->scsiStatus = 0; pccb->startTime = ticks; AGTIAPI_PRINTK( "agtiapi_Retry: start\n" ); AGTIAPI_PRINTK( "agtiapi_Retry: ccb %p retry %d flgs x%x\n", pccb, pccb->retryCount, pccb->flags ); agtiapi_QueueCCB(pmcsc, &pmcsc->ccbSendHead, &pmcsc->ccbSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendLock), pccb); return; } /****************************************************************************** agtiapi_DumpCCB() Purpose: Dump CCB for debuging Parameters: ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_DumpCCB(ccb_t *pccb) { AGTIAPI_PRINTK("agtiapi_DumpCCB: pccb %p, devHandle %p, tid %d, lun %d\n", pccb, pccb->devHandle, pccb->targetId, pccb->lun); AGTIAPI_PRINTK("flag 0x%x, add_mode 0x%x, ccbStatus 0x%x, scsiStatus 0x%x\n", pccb->flags, pccb->addrMode, pccb->ccbStatus, pccb->scsiStatus); AGTIAPI_PRINTK("scsi comand = 0x%x, numSgElements = %d\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[0], pccb->numSgElements); AGTIAPI_PRINTK("dataLen = 0x%x, sens_len = 0x%x\n", pccb->dataLen, pccb->senseLen); AGTIAPI_PRINTK("tiSuperScsiRequest:\n"); AGTIAPI_PRINTK("scsiCmnd: expDataLength 0x%x, taskAttribute 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.expDataLength, pccb->tiSuperScsiRequest.scsiCmnd.taskAttribute); AGTIAPI_PRINTK("cdb[0] = 0x%x, cdb[1] = 0x%x, cdb[2] = 0x%x, cdb[3] = 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[0], pccb->tiSuperScsiRequest.scsiCmnd.cdb[1], pccb->tiSuperScsiRequest.scsiCmnd.cdb[2], pccb->tiSuperScsiRequest.scsiCmnd.cdb[3]); AGTIAPI_PRINTK("cdb[4] = 0x%x, cdb[5] = 0x%x, cdb[6] = 0x%x, cdb[7] = 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[4], pccb->tiSuperScsiRequest.scsiCmnd.cdb[5], pccb->tiSuperScsiRequest.scsiCmnd.cdb[6], pccb->tiSuperScsiRequest.scsiCmnd.cdb[7]); AGTIAPI_PRINTK( "cdb[8] = 0x%x, cdb[9] = 0x%x, cdb[10] = 0x%x, " "cdb[11] = 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[8], pccb->tiSuperScsiRequest.scsiCmnd.cdb[9], pccb->tiSuperScsiRequest.scsiCmnd.cdb[10], pccb->tiSuperScsiRequest.scsiCmnd.cdb[11] ); AGTIAPI_PRINTK("agSgl1: upper 0x%x, lower 0x%x, len 0x%x, type %d\n", pccb->tiSuperScsiRequest.agSgl1.upper, pccb->tiSuperScsiRequest.agSgl1.lower, pccb->tiSuperScsiRequest.agSgl1.len, pccb->tiSuperScsiRequest.agSgl1.type); } /****************************************************************************** agtiapi_eh_HostReset() Purpose: A new error handler of Host Reset command. Parameters: scsi_cmnd *cmnd (IN) Pointer to a command to the HBA to be reset Return: SUCCESS - success FAILED - fail Note: ******************************************************************************/ int agtiapi_eh_HostReset( struct agtiapi_softc *pmcsc, union ccb *cmnd ) { AGTIAPI_PRINTK( "agtiapi_eh_HostReset: ccb pointer %p\n", cmnd ); if( cmnd == NULL ) { printf( "agtiapi_eh_HostReset: null command, skipping reset.\n" ); return tiInvalidHandle; } #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "agtiapi_eh_HostReset! " ); #endif return agtiapi_DoSoftReset( pmcsc ); } /****************************************************************************** agtiapi_QueueCCB() Purpose: Put ccb in ccb queue at the tail Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t *phead (IN) Double pointer to ccb queue head pccb_t *ptail (IN) Double pointer to ccb queue tail ccb_t *pccb (IN) Poiner to a ccb to be queued Return: Note: Put the ccb to the tail of queue ******************************************************************************/ STATIC void agtiapi_QueueCCB( struct agtiapi_softc *pmcsc, pccb_t *phead, pccb_t *ptail, #ifdef AGTIAPI_LOCAL_LOCK struct mtx *mutex, #endif ccb_t *pccb ) { AGTIAPI_IO( "agtiapi_QueueCCB: start\n" ); AGTIAPI_IO( "agtiapi_QueueCCB: %p to %p\n", pccb, phead ); if (phead == NULL || ptail == NULL) { panic( "agtiapi_QueueCCB: phead %p ptail %p", phead, ptail ); } pccb->pccbNext = NULL; AG_LOCAL_LOCK( mutex ); if (*phead == NULL) { //WARN_ON(*ptail != NULL); /* critical, just get more logs */ *phead = pccb; } else { //WARN_ON(*ptail == NULL); /* critical, just get more logs */ if (*ptail) (*ptail)->pccbNext = pccb; } *ptail = pccb; AG_LOCAL_UNLOCK( mutex ); return; } /****************************************************************************** agtiapi_QueueCCB() Purpose: Parameters: Return: Note: ******************************************************************************/ static int agtiapi_QueueSMP(struct agtiapi_softc *pmcsc, union ccb * ccb) { pccb_t pccb = agNULL; /* call dequeue */ int status = tiSuccess; int targetID = xpt_path_target_id(ccb->ccb_h.path); AGTIAPI_PRINTK("agtiapi_QueueSMP: start\n"); /* get a ccb */ if ((pccb = agtiapi_GetCCB(pmcsc)) == NULL) { AGTIAPI_PRINTK("agtiapi_QueueSMP: GetCCB ERROR\n"); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return tiBusy; } pccb->pmcsc = pmcsc; /* initialize Command Control Block (CCB) */ pccb->targetId = targetID; pccb->ccb = ccb; /* for struct scsi_cmnd */ status = agtiapi_PrepareSMPSGList(pmcsc, pccb); if (status != tiSuccess) { AGTIAPI_PRINTK("agtiapi_QueueSMP: agtiapi_PrepareSMPSGList failure\n"); agtiapi_FreeCCB(pmcsc, pccb); if (status == tiReject) { ccb->ccb_h.status = CAM_REQ_INVALID; } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); return tiError; } return status; } /****************************************************************************** agtiapi_SetLunField() Purpose: Set LUN field based on different address mode Parameters: ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ void agtiapi_SetLunField(ccb_t *pccb) { U08 *pchar; pchar = (U08 *)&pccb->tiSuperScsiRequest.scsiCmnd.lun; // AGTIAPI_PRINTK("agtiapi_SetLunField: start\n"); switch (pccb->addrMode) { case AGTIAPI_PERIPHERAL: *pchar++ = 0; *pchar = (U08)pccb->lun; break; case AGTIAPI_VOLUME_SET: *pchar++ = (AGTIAPI_VOLUME_SET << AGTIAPI_ADDRMODE_SHIFT) | (U08)((pccb->lun >> 8) & 0x3F); *pchar = (U08)pccb->lun; break; case AGTIAPI_LUN_ADDR: *pchar++ = (AGTIAPI_LUN_ADDR << AGTIAPI_ADDRMODE_SHIFT) | pccb->targetId; *pchar = (U08)pccb->lun; break; } } /***************************************************************************** agtiapi_FreeCCB() Purpose: Free a ccb and put it back to ccbFreeList. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Returns: Note: *****************************************************************************/ STATIC void agtiapi_FreeCCB(struct agtiapi_softc *pmcsc, pccb_t pccb) { union ccb *ccb = pccb->ccb; bus_dmasync_op_t op; AG_LOCAL_LOCK(&pmcsc->ccbLock); AGTIAPI_IO( "agtiapi_FreeCCB: start %p\n", pccb ); #ifdef AGTIAPI_TEST_EPL tiEncrypt_t *encrypt; #endif agtiapi_DumpCDB( "agtiapi_FreeCCB", pccb ); if (pccb->sgList != agNULL) { AGTIAPI_IO( "agtiapi_FreeCCB: pccb->sgList is NOT null\n" ); } else { AGTIAPI_PRINTK( "agtiapi_FreeCCB: pccb->sgList is null\n" ); } /* set data transfer direction */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { op = BUS_DMASYNC_POSTWRITE; } else { op = BUS_DMASYNC_POSTREAD; } if (pccb->numSgElements == 0) { // do nothing AGTIAPI_IO( "agtiapi_FreeCCB: numSgElements zero\n" ); } else if (pccb->numSgElements == 1) { AGTIAPI_IO( "agtiapi_FreeCCB: numSgElements is one\n" ); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } else { AGTIAPI_PRINTK( "agtiapi_FreeCCB: numSgElements 2 or higher \n" ); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } #ifdef AGTIAPI_TEST_DPL if (pccb->tiSuperScsiRequest.Dif.enableDIFPerLA == TRUE) { if(pccb->dplPtr) memset( (char *) pccb->dplPtr, 0, MAX_DPL_REGIONS * sizeof(dplaRegion_t) ); pccb->tiSuperScsiRequest.Dif.enableDIFPerLA = FALSE; pccb->tiSuperScsiRequest.Dif.DIFPerLAAddrLo = 0; pccb->tiSuperScsiRequest.Dif.DIFPerLAAddrHi = 0; } #endif #ifdef AGTIAPI_TEST_EPL encrypt = &pccb->tiSuperScsiRequest.Encrypt; if (encrypt->enableEncryptionPerLA == TRUE) { encrypt->enableEncryptionPerLA = FALSE; encrypt->EncryptionPerLAAddrLo = 0; encrypt->EncryptionPerLAAddrHi = 0; } #endif #ifdef ENABLE_SATA_DIF if (pccb->holePtr && pccb->dmaHandleHole) pci_free_consistent( pmcsc->pCardInfo->pPCIDev, 512, pccb->holePtr, pccb->dmaHandleHole ); pccb->holePtr = 0; pccb->dmaHandleHole = 0; #endif pccb->dataLen = 0; pccb->retryCount = 0; pccb->ccbStatus = 0; pccb->scsiStatus = 0; pccb->startTime = 0; pccb->dmaHandle = 0; pccb->numSgElements = 0; pccb->tiIORequest.tdData = 0; memset((void *)&pccb->tiSuperScsiRequest, 0, AGSCSI_INIT_XCHG_LEN); #ifdef HIALEAH_ENCRYPTION if (pmcsc->encrypt) agtiapi_CleanupEncryptedIO(pmcsc, pccb); #endif pccb->flags = 0; pccb->ccb = NULL; pccb->pccbIO = NULL; pccb->pccbNext = (pccb_t)pmcsc->ccbFreeList; pmcsc->ccbFreeList = (caddr_t *)pccb; pmcsc->activeCCB--; AG_LOCAL_UNLOCK(&pmcsc->ccbLock); return; } /****************************************************************************** agtiapi_FlushCCBs() Purpose: Flush all in processed ccbs. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure U32 flag (IN) Flag to call back Return: Note: ******************************************************************************/ STATIC void agtiapi_FlushCCBs( struct agtiapi_softc *pCard, U32 flag ) { union ccb *ccb; ccb_t *pccb; AGTIAPI_PRINTK( "agtiapi_FlushCCBs: enter \n" ); for( pccb = (pccb_t)pCard->ccbChainList; pccb != NULL; pccb = pccb->pccbChainNext ) { if( pccb->flags == 0 ) { // printf( "agtiapi_FlushCCBs: nothing, continue \n" ); continue; } ccb = pccb->ccb; if ( pccb->flags & ( TASK_MANAGEMENT | DEV_RESET ) ) { AGTIAPI_PRINTK( "agtiapi_FlushCCBs: agtiapi_FreeTMCCB \n" ); agtiapi_FreeTMCCB( pCard, pccb ); } else { if ( pccb->flags & TAG_SMP ) { AGTIAPI_PRINTK( "agtiapi_FlushCCBs: agtiapi_FreeSMPCCB \n" ); agtiapi_FreeSMPCCB( pCard, pccb ); } else { AGTIAPI_PRINTK( "agtiapi_FlushCCBs: agtiapi_FreeCCB \n" ); agtiapi_FreeCCB( pCard, pccb ); } if( ccb ) { CMND_DMA_UNMAP( pCard, ccb ); if( flag == AGTIAPI_CALLBACK ) { ccb->ccb_h.status = CAM_SCSI_BUS_RESET; xpt_done( ccb ); } } } } } /***************************************************************************** agtiapi_FreeSMPCCB() Purpose: Free a ccb and put it back to ccbFreeList. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Returns: Note: *****************************************************************************/ STATIC void agtiapi_FreeSMPCCB(struct agtiapi_softc *pmcsc, pccb_t pccb) { union ccb *ccb = pccb->ccb; bus_dmasync_op_t op; AG_LOCAL_LOCK(&pmcsc->ccbLock); AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: start %p\n", pccb); /* set data transfer direction */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { op = BUS_DMASYNC_POSTWRITE; } else { op = BUS_DMASYNC_POSTREAD; } if (pccb->numSgElements == 0) { // do nothing AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: numSgElements 0\n"); } else if (pccb->numSgElements == 1) { AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: numSgElements 1\n"); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } else { AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: numSgElements 2 or higher \n"); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } /*dma api cleanning*/ pccb->dataLen = 0; pccb->retryCount = 0; pccb->ccbStatus = 0; pccb->startTime = 0; pccb->dmaHandle = 0; pccb->numSgElements = 0; pccb->tiIORequest.tdData = 0; memset((void *)&pccb->tiSMPFrame, 0, AGSMP_INIT_XCHG_LEN); pccb->flags = 0; pccb->ccb = NULL; pccb->pccbNext = (pccb_t)pmcsc->ccbFreeList; pmcsc->ccbFreeList = (caddr_t *)pccb; pmcsc->activeCCB--; AG_LOCAL_UNLOCK(&pmcsc->ccbLock); return; } /***************************************************************************** agtiapi_FreeTMCCB() Purpose: Free a ccb and put it back to ccbFreeList. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Returns: Note: *****************************************************************************/ STATIC void agtiapi_FreeTMCCB(struct agtiapi_softc *pmcsc, pccb_t pccb) { AG_LOCAL_LOCK(&pmcsc->ccbLock); AGTIAPI_PRINTK("agtiapi_FreeTMCCB: start %p\n", pccb); pccb->dataLen = 0; pccb->retryCount = 0; pccb->ccbStatus = 0; pccb->scsiStatus = 0; pccb->startTime = 0; pccb->dmaHandle = 0; pccb->numSgElements = 0; pccb->tiIORequest.tdData = 0; memset((void *)&pccb->tiSuperScsiRequest, 0, AGSCSI_INIT_XCHG_LEN); pccb->flags = 0; pccb->ccb = NULL; pccb->pccbIO = NULL; pccb->pccbNext = (pccb_t)pmcsc->ccbFreeList; pmcsc->ccbFreeList = (caddr_t *)pccb; pmcsc->activeCCB--; AG_LOCAL_UNLOCK(&pmcsc->ccbLock); return; } /****************************************************************************** agtiapi_CheckAllVectors(): Purpose: Parameters: Return: Note: Currently, not used. ******************************************************************************/ void agtiapi_CheckAllVectors( struct agtiapi_softc *pCard, bit32 context ) { #ifdef SPC_MSIX_INTR if (!agtiapi_intx_mode) { int i; for (i = 0; i < pCard->pCardInfo->maxInterruptVectors; i++) if (tiCOMInterruptHandler(&pCard->tiRoot, i) == agTRUE) tiCOMDelayedInterruptHandler(&pCard->tiRoot, i, 100, context); } else if (tiCOMInterruptHandler(&pCard->tiRoot, 0) == agTRUE) tiCOMDelayedInterruptHandler(&pCard->tiRoot, 0, 100, context); #else if (tiCOMInterruptHandler(&pCard->tiRoot, 0) == agTRUE) tiCOMDelayedInterruptHandler(&pCard->tiRoot, 0, 100, context); #endif } /****************************************************************************** agtiapi_CheckCB() Purpose: Check call back function returned event for process completion Parameters: struct agtiapi_softc *pCard Pointer to card data structure U32 milisec (IN) Waiting time for expected event U32 flag (IN) Flag of the event to check U32 *pStatus (IN) Pointer to status of the card or port to check Return: AGTIAPI_SUCCESS - event comes as expected AGTIAPI_FAIL - event not coming Note: Currently, not used ******************************************************************************/ agBOOLEAN agtiapi_CheckCB( struct agtiapi_softc *pCard, U32 milisec, U32 flag, volatile U32 *pStatus ) { U32 msecsPerTick = pCard->pCardInfo->tiRscInfo.tiInitiatorResource. initiatorOption.usecsPerTick / 1000; S32 i = milisec/msecsPerTick; AG_GLOBAL_ARG( _flags ); AGTIAPI_PRINTK( "agtiapi_CheckCB: start\n" ); AGTIAPI_FLOW( "agtiapi_CheckCB: start\n" ); if( i <= 0 ) i = 1; while (i > 0) { if (*pStatus & TASK_MANAGEMENT) { if (*pStatus & AGTIAPI_CB_DONE) { if( flag == 0 || *pStatus & flag ) return AGTIAPI_SUCCESS; else return AGTIAPI_FAIL; } } else if (pCard->flags & AGTIAPI_CB_DONE) { if( flag == 0 || *pStatus & flag ) return AGTIAPI_SUCCESS; else return AGTIAPI_FAIL; } agtiapi_DelayMSec( msecsPerTick ); AG_SPIN_LOCK_IRQ( agtiapi_host_lock, _flags ); tiCOMTimerTick( &pCard->tiRoot ); agtiapi_CheckAllVectors( pCard, tiNonInterruptContext ); AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, _flags ); i--; } if( *pStatus & TASK_MANAGEMENT ) *pStatus |= TASK_TIMEOUT; return AGTIAPI_FAIL; } /****************************************************************************** agtiapi_DiscoverTgt() Purpose: Discover available devices Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_DiscoverTgt(struct agtiapi_softc *pCard) { ag_portal_data_t *pPortalData; U32 count; AGTIAPI_PRINTK("agtiapi_DiscoverTgt: start\n"); AGTIAPI_FLOW("agtiapi_DiscoverTgt\n"); AGTIAPI_INIT("agtiapi_DiscoverTgt\n"); pPortalData = pCard->pPortalData; for (count = 0; count < pCard->portCount; count++, pPortalData++) { pCard->flags &= ~AGTIAPI_CB_DONE; if (!(PORTAL_STATUS(pPortalData) & AGTIAPI_PORT_DISC_READY)) { if (pCard->flags & AGTIAPI_INIT_TIME) { if (agtiapi_CheckCB(pCard, 5000, AGTIAPI_PORT_DISC_READY, &PORTAL_STATUS(pPortalData)) == AGTIAPI_FAIL) { AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Port %p / %d not ready for " "discovery\n", pPortalData, count ); /* * There is no need to spend time on discovering device * if port is not ready to do so. */ continue; } } else continue; } AGTIAPI_FLOW( "agtiapi_DiscoverTgt: Portal %p DiscoverTargets starts\n", pPortalData ); AGTIAPI_INIT_DELAY(1000); pCard->flags &= ~AGTIAPI_CB_DONE; if (tiINIDiscoverTargets(&pCard->tiRoot, &pPortalData->portalInfo.tiPortalContext, FORCE_PERSISTENT_ASSIGN_MASK) != tiSuccess) AGTIAPI_PRINTK("agtiapi_DiscoverTgt: tiINIDiscoverTargets ERROR\n"); /* * Should wait till discovery completion to start * next portal. However, lower layer have issue on * multi-portal case under Linux. */ } pPortalData = pCard->pPortalData; for (count = 0; count < pCard->portCount; count++, pPortalData++) { if ((PORTAL_STATUS(pPortalData) & AGTIAPI_PORT_DISC_READY)) { if (agtiapi_CheckCB(pCard, 20000, AGTIAPI_DISC_COMPLETE, &PORTAL_STATUS(pPortalData)) == AGTIAPI_FAIL) { if ((PORTAL_STATUS(pPortalData) & AGTIAPI_DISC_COMPLETE)) AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Portal %p discover complete, " "status 0x%x\n", pPortalData, PORTAL_STATUS(pPortalData) ); else AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Portal %p discover is not " "completed, status 0x%x\n", pPortalData, PORTAL_STATUS(pPortalData) ); continue; } AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Portal %d discover target " "success\n", count ); } } /* * Calling to get device handle should be done per portal based * and better right after discovery is done. However, lower iscsi * layer may not returns discovery complete in correct sequence or we * ran out time. We get device handle for all portals together * after discovery is done or timed out. */ pPortalData = pCard->pPortalData; for (count = 0; count < pCard->portCount; count++, pPortalData++) { /* * We try to get device handle no matter * if discovery is completed or not. */ if (PORTAL_STATUS(pPortalData) & AGTIAPI_PORT_DISC_READY) { U32 i; for (i = 0; i < AGTIAPI_GET_DEV_MAX; i++) { if (agtiapi_GetDevHandle(pCard, &pPortalData->portalInfo, 0, 0) != 0) break; agtiapi_DelayMSec(AGTIAPI_EXTRA_DELAY); } if ((PORTAL_STATUS(pPortalData) & AGTIAPI_DISC_COMPLETE) || (pCard->tgtCount > 0)) PORTAL_STATUS(pPortalData) |= ( AGTIAPI_DISC_DONE | AGTIAPI_PORT_LINK_UP ); } } return; } /****************************************************************************** agtiapi_PrepCCBs() Purpose: Prepares CCB including DMA map. Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure ccb_hdr_t *hdr (IN) Pointer to the CCB header U32 size (IN) size U32 max_ccb (IN) count Return: Note: ******************************************************************************/ STATIC void agtiapi_PrepCCBs( struct agtiapi_softc *pCard, ccb_hdr_t *hdr, U32 size, U32 max_ccb, int tid ) { int i; U32 hdr_sz, ccb_sz; ccb_t *pccb = NULL; int offset = 0; int nsegs = 0; int sgl_sz = 0; AGTIAPI_PRINTK("agtiapi_PrepCCBs: start\n"); offset = tid * AGTIAPI_CCB_PER_DEVICE; nsegs = AGTIAPI_NSEGS; sgl_sz = sizeof(tiSgl_t) * nsegs; AGTIAPI_PRINTK( "agtiapi_PrepCCBs: tid %d offset %d nsegs %d sizeof(tiSgl_t) " "%lu, max_ccb %d\n", tid, offset, nsegs, sizeof(tiSgl_t), max_ccb ); ccb_sz = roundup2(AGTIAPI_CCB_SIZE, cache_line_size()); hdr_sz = roundup2(sizeof(*hdr), cache_line_size()); AGTIAPI_PRINTK("agtiapi_PrepCCBs: after cache line\n"); memset((void *)hdr, 0, size); hdr->next = pCard->ccbAllocList; pCard->ccbAllocList = hdr; AGTIAPI_PRINTK("agtiapi_PrepCCBs: after memset\n"); pccb = (ccb_t*) ((char*)hdr + hdr_sz); for (i = 0; i < max_ccb; i++, pccb = (ccb_t*)((char*)pccb + ccb_sz)) { pccb->tiIORequest.osData = (void *)pccb; /* * Initially put all the ccbs on the free list * in addition to chainlist. * ccbChainList is a list of all available ccbs * (free/active everything) */ pccb->pccbChainNext = (pccb_t)pCard->ccbChainList; pccb->pccbNext = (pccb_t)pCard->ccbFreeList; pCard->ccbChainList = (caddr_t *)pccb; pCard->ccbFreeList = (caddr_t *)pccb; pCard->ccbTotal++; #ifdef AGTIAPI_ALIGN_CHECK if (&pccb & 0x63) AGTIAPI_PRINTK("pccb = %p\n", pccb); if (pccb->devHandle & 0x63) AGTIAPI_PRINTK("devHandle addr = %p\n", &pccb->devHandle); if (&pccb->lun & 0x63) AGTIAPI_PRINTK("lun addr = %p\n", &pccb->lun); if (&pccb->targetId & 0x63) AGTIAPI_PRINTK("tig addr = %p\n", &pccb->targetId); if (&pccb->ccbStatus & 0x63) AGTIAPI_PRINTK("ccbStatus addr = %p\n", &pccb->ccbStatus); if (&pccb->scsiStatus & 0x63) AGTIAPI_PRINTK("scsiStatus addr = %p\n", &pccb->scsiStatus); if (&pccb->dataLen & 0x63) AGTIAPI_PRINTK("dataLen addr = %p\n", &pccb->dataLen); if (&pccb->senseLen & 0x63) AGTIAPI_PRINTK("senseLen addr = %p\n", &pccb->senseLen); if (&pccb->numSgElements & 0x63) AGTIAPI_PRINTK("numSgElements addr = %p\n", &pccb->numSgElements); if (&pccb->retryCount & 0x63) AGTIAPI_PRINTK("retry cnt addr = %p\n", &pccb->retryCount); if (&pccb->flags & 0x63) AGTIAPI_PRINTK("flag addr = %p\n", &pccb->flags); if (&pccb->pSenseData & 0x63) AGTIAPI_PRINTK("senseData addr = %p\n", &pccb->pSenseData); if (&pccb->sgList[0] & 0x63) AGTIAPI_PRINTK("SgList 0 = %p\n", &pccb->sgList[0]); if (&pccb->pccbNext & 0x63) AGTIAPI_PRINTK("ccb next = %p\n", &pccb->pccbNext); if (&pccb->pccbChainNext & 0x63) AGTIAPI_PRINTK("ccbChainNext = %p\n", &pccb->pccbChainNext); if (&pccb->cmd & 0x63) AGTIAPI_PRINTK("command = %p\n", &pccb->cmd); if( &pccb->startTime & 0x63 ) AGTIAPI_PRINTK( "startTime = %p\n", &pccb->startTime ); if (&pccb->tiIORequest & 0x63) AGTIAPI_PRINTK("tiIOReq addr = %p\n", &pccb->tiIORequest); if (&pccb->tdIOReqBody & 0x63) AGTIAPI_PRINTK("tdIORequestBody addr = %p\n", &pccb->tdIOReqBody); if (&pccb->tiSuperScsiRequest & 0x63) AGTIAPI_PRINTK( "InitiatorExchange addr = %p\n", &pccb->tiSuperScsiRequest ); #endif if ( bus_dmamap_create( pCard->buffer_dmat, 0, &pccb->CCB_dmamap ) != tiSuccess) { AGTIAPI_PRINTK("agtiapi_PrepCCBs: can't create dma\n"); return; } /* assigns tiSgl_t memory to pccb */ pccb->sgList = (void*)((U64)pCard->tisgl_mem + ((i + offset) * sgl_sz)); pccb->tisgl_busaddr = pCard->tisgl_busaddr + ((i + offset) * sgl_sz); pccb->ccb = NULL; pccb->pccbIO = NULL; pccb->startTime = 0; } #ifdef AGTIAPI_ALIGN_CHECK AGTIAPI_PRINTK("ccb size = %d / %d\n", sizeof(ccb_t), ccb_sz); #endif return; } /****************************************************************************** agtiapi_InitCCBs() Purpose: Create and initialize per card based CCB pool. Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure int tgtCount (IN) Count Return: Total number of ccb allocated Note: ******************************************************************************/ STATIC U32 agtiapi_InitCCBs(struct agtiapi_softc *pCard, int tgtCount, int tid) { U32 max_ccb, size, ccb_sz, hdr_sz; int no_allocs = 0, i; ccb_hdr_t *hdr = NULL; AGTIAPI_PRINTK("agtiapi_InitCCBs: start\n"); AGTIAPI_PRINTK("agtiapi_InitCCBs: tgtCount %d tid %d\n", tgtCount, tid); AGTIAPI_FLOW("agtiapi_InitCCBs: tgtCount %d tid %d\n", tgtCount, tid); #ifndef HOTPLUG_SUPPORT if (pCard->tgtCount > AGSA_MAX_INBOUND_Q) return 1; #else if (tgtCount > AGSA_MAX_INBOUND_Q) tgtCount = AGSA_MAX_INBOUND_Q; #endif max_ccb = tgtCount * AGTIAPI_CCB_PER_DEVICE;// / 4; // TBR ccb_sz = roundup2(AGTIAPI_CCB_SIZE, cache_line_size()); hdr_sz = roundup2(sizeof(*hdr), cache_line_size()); size = ccb_sz * max_ccb + hdr_sz; for (i = 0; i < (1 << no_allocs); i++) { hdr = (ccb_hdr_t*)malloc( size, M_PMC_MCCB, M_NOWAIT ); if( !hdr ) { panic( "agtiapi_InitCCBs: bug!!!\n" ); } else { agtiapi_PrepCCBs( pCard, hdr, size, max_ccb, tid ); } } return 1; } #ifdef LINUX_PERBI_SUPPORT /****************************************************************************** agtiapi_GetWWNMappings() Purpose: Get the mappings from target IDs to WWNs, if any. Store them in the WWN_list array, indexed by target ID. Leave the devListIndex field blank; this will be filled-in later. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure ag_mapping_t *pMapList (IN) Pointer to mapped device list Return: Note: The boot command line parameters are used to load the mapping information, which is contained in the system configuration file. ******************************************************************************/ STATIC void agtiapi_GetWWNMappings( struct agtiapi_softc *pCard, ag_mapping_t *pMapList ) { int devDisc; int lIdx = 0; ag_tgt_map_t *pWWNList; ag_slr_map_t *pSLRList; ag_device_t *pDevList; if( !pCard ) panic( "agtiapi_GetWWNMappings: no pCard \n" ); AGTIAPI_PRINTK( "agtiapi_GetWWNMappings: start\n" ); pWWNList = pCard->pWWNList; pSLRList = pCard->pSLRList; pDevList = pCard->pDevList; pCard->numTgtHardMapped = 0; devDisc = pCard->devDiscover; pWWNList[devDisc-1].devListIndex = maxTargets; pSLRList[devDisc-1].localeNameLen = -2; pSLRList[devDisc-1].remoteNameLen = -2; pDevList[devDisc-1].targetId = maxTargets; /* * Get the mappings from holding area which contains * the input of the system file and store them * in the WWN_list array, indexed by target ID. */ for ( lIdx = 0; lIdx < devDisc - 1; lIdx++) { pWWNList[lIdx].flags = 0; pWWNList[lIdx].devListIndex = maxTargets; pSLRList[lIdx].localeNameLen = -1; pSLRList[lIdx].remoteNameLen = -1; } // this is where we would propagate values fed to pMapList } /* agtiapi_GetWWNMappings */ #endif /****************************************************************************** agtiapi_FindWWNListNext() Purpose: finds first available new (unused) wwn list entry Parameters: ag_tgt_map_t *pWWNList Pointer to head of wwn list int lstMax Number of entries in WWNList Return: index into WWNList indicating available entry space; if available entry space is not found, return negative value ******************************************************************************/ STATIC int agtiapi_FindWWNListNext( ag_tgt_map_t *pWWNList, int lstMax ) { int lLstIdx; for ( lLstIdx = 0; lLstIdx < lstMax; lLstIdx++ ) { if ( pWWNList[lLstIdx].devListIndex == lstMax && pWWNList[lLstIdx].targetLen == 0 ) { AGTIAPI_PRINTK( "agtiapi_FindWWNListNext: %d %d %d %d v. %d\n", lLstIdx, pWWNList[lLstIdx].devListIndex, pWWNList[lLstIdx].targetLen, pWWNList[lLstIdx].portId, lstMax ); return lLstIdx; } } return -1; } /****************************************************************************** agtiapi_GetDevHandle() Purpose: Get device handle. Handles will be placed in the devlist array with same order as TargetList provided and will be mapped to a scsi target id and registered to OS later. Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure ag_portal_info_t *pPortalInfo (IN) Pointer to the portal data structure U32 eType (IN) Port event U32 eStatus (IN) Port event status Return: Number of device handle slot present Note: The sequence of device handle will match the sequence of taregt list ******************************************************************************/ STATIC U32 agtiapi_GetDevHandle( struct agtiapi_softc *pCard, ag_portal_info_t *pPortalInfo, U32 eType, U32 eStatus ) { ag_device_t *pDevice; // tiDeviceHandle_t *agDev[pCard->devDiscover]; tiDeviceHandle_t **agDev; int devIdx, szdv, devTotal, cmpsetRtn; int lDevIndex = 0, lRunScanFlag = FALSE; int *lDevFlags; tiPortInfo_t portInfT; ag_device_t lTmpDevice; ag_tgt_map_t *pWWNList; ag_slr_map_t *pSLRList; bit32 lReadRm; bit16 lReadCt; AGTIAPI_PRINTK( "agtiapi_GetDevHandle: start\n" ); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: pCard->devDiscover %d / tgtCt %d\n", pCard->devDiscover, pCard->tgtCount ); AGTIAPI_FLOW( "agtiapi_GetDevHandle: portalInfo %p\n", pPortalInfo ); AGTIAPI_INIT_DELAY( 1000 ); agDev = (tiDeviceHandle_t **) malloc( sizeof(tiDeviceHandle_t *) * pCard->devDiscover, M_PMC_MDEV, M_ZERO | M_NOWAIT); if (agDev == NULL) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: failed to alloc agDev[]\n" ); return 0; } lDevFlags = (int *) malloc( sizeof(int) * pCard->devDiscover, M_PMC_MFLG, M_ZERO | M_NOWAIT ); if (lDevFlags == NULL) { free((caddr_t)agDev, M_PMC_MDEV); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: failed to alloc lDevFlags[]\n" ); return 0; } pWWNList = pCard->pWWNList; pSLRList = pCard->pSLRList; memset( (void *)agDev, 0, sizeof(void *) * pCard->devDiscover ); memset( lDevFlags, 0, sizeof(int) * pCard->devDiscover ); // get device handles devTotal = tiINIGetDeviceHandles( &pCard->tiRoot, &pPortalInfo->tiPortalContext, (tiDeviceHandle_t **)agDev, pCard->devDiscover ); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: portalInfo %p port id %d event %u " "status %u card %p pCard->devDiscover %d devTotal %d " "pPortalInfo->devTotal %d pPortalInfo->devPrev %d " "AGTIAPI_INIT_TIME %x\n", pPortalInfo, pPortalInfo->portID, eType, eStatus, pCard, pCard->devDiscover, devTotal, pPortalInfo->devTotal, pPortalInfo->devPrev, pCard->flags & AGTIAPI_INIT_TIME ); // reset devTotal from any previous runs of this pPortalInfo->devPrev = devTotal; pPortalInfo->devTotal = devTotal; AG_LIST_LOCK( &pCard->devListLock ); if ( tiCOMGetPortInfo( &pCard->tiRoot, &pPortalInfo->tiPortalContext, &portInfT ) != tiSuccess) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: tiCOMGetPortInfo did not succeed. \n" ); } szdv = sizeof( pPortalInfo->pDevList ) / sizeof( pPortalInfo->pDevList[0] ); if (szdv > pCard->devDiscover) { szdv = pCard->devDiscover; } // reconstructing dev list via comparison of wwn for ( devIdx = 0; devIdx < pCard->devDiscover; devIdx++ ) { if ( agDev[devIdx] != NULL ) { // AGTIAPI_PRINTK( "agtiapi_GetDevHandle: agDev %d not NULL %p\n", // devIdx, agDev[devIdx] ); // pack temp device structure for tiINIGetDeviceInfo call pDevice = &lTmpDevice; pDevice->devType = DIRECT_DEVICE; pDevice->pCard = (void *)pCard; pDevice->flags = ACTIVE; pDevice->pPortalInfo = pPortalInfo; pDevice->pDevHandle = agDev[devIdx]; pDevice->qbusy = agFALSE; //AGTIAPI_PRINTK( "agtiapi_GetDevHandle: idx %d / %d : %p \n", // devIdx, pCard->devDiscover, agDev[devIdx] ); tiINIGetDeviceInfo( &pCard->tiRoot, agDev[devIdx], &pDevice->devInfo ); //AGTIAPI_PRINTK( "agtiapi_GetDevHandle: wwn sizes %ld %d/%d ", // sizeof(pDevice->targetName), // pDevice->devInfo.osAddress1, // pDevice->devInfo.osAddress2 ); wwncpy( pDevice ); wwnprintk( (unsigned char*)pDevice->targetName, pDevice->targetLen ); for ( lDevIndex = 0; lDevIndex < szdv; lDevIndex++ ) // match w/ wwn list { if ( (pCard->pDevList[lDevIndex].portalId == pPortalInfo->portID) && pDevice->targetLen > 0 && portInfT.localNameLen > 0 && portInfT.remoteNameLen > 0 && pSLRList[pWWNList[lDevIndex].sasLrIdx].localeNameLen > 0 && pSLRList[pWWNList[lDevIndex].sasLrIdx].remoteNameLen > 0 && ( portInfT.localNameLen == pSLRList[pWWNList[lDevIndex].sasLrIdx].localeNameLen ) && ( portInfT.remoteNameLen == pSLRList[pWWNList[lDevIndex].sasLrIdx].remoteNameLen ) && memcmp( pWWNList[lDevIndex].targetName, pDevice->targetName, pDevice->targetLen ) == 0 && memcmp( pSLRList[pWWNList[lDevIndex].sasLrIdx].localeName, portInfT.localName, portInfT.localNameLen ) == 0 && memcmp( pSLRList[pWWNList[lDevIndex].sasLrIdx].remoteName, portInfT.remoteName, portInfT.remoteNameLen ) == 0 ) { AGTIAPI_PRINTK( " pWWNList match @ %d/%d/%d \n", lDevIndex, devIdx, pPortalInfo->portID ); if ( (pCard->pDevList[lDevIndex].targetId == lDevIndex) && ( pPortalInfo->pDevList[lDevIndex] == &pCard->pDevList[lDevIndex] ) ) // active { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: dev in use %d of %d/%d\n", lDevIndex, devTotal, pPortalInfo->portID ); lDevFlags[devIdx] |= DPMC_LEANFLAG_AGDEVUSED; // agDev handle lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // pDevice used lReadRm = atomic_readandclear_32( &pWWNList[lDevIndex].devRemoved ); if ( lReadRm ) // cleared timeout, now remove count for timer { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: clear timer count for" " %d of %d\n", lDevIndex, pPortalInfo->portID ); atomic_subtract_16( &pCard->rmChkCt, 1 ); lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( 0 == lReadCt ) { callout_stop( &pCard->devRmTimer ); } } break; } AGTIAPI_PRINTK( "agtiapi_GetDevHandle: goin fresh on %d of %d/%d\n", lDevIndex, // reactivate now devTotal, pPortalInfo->portID ); // pDevice going fresh lRunScanFlag = TRUE; // scan and clear outstanding removals // pCard->tgtCount++; ## pDevice->targetId = lDevIndex; pDevice->portalId = pPortalInfo->portID; memcpy ( &pCard->pDevList[lDevIndex], pDevice, sizeof(lTmpDevice) ); agDev[devIdx]->osData = (void *)&pCard->pDevList[lDevIndex]; if ( agtiapi_InitCCBs( pCard, 1, pDevice->targetId ) == 0 ) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: InitCCB " "tgtCnt %d ERROR!\n", pCard->tgtCount ); AG_LIST_UNLOCK( &pCard->devListLock ); free((caddr_t)lDevFlags, M_PMC_MFLG); free((caddr_t)agDev, M_PMC_MDEV); return 0; } pPortalInfo->pDevList[lDevIndex] = &pCard->pDevList[lDevIndex]; // (ag_device_t *) if ( 0 == lDevFlags[devIdx] ) { pPortalInfo->devTotal++; lDevFlags[devIdx] |= DPMC_LEANFLAG_AGDEVUSED; // agDev used lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // pDevice used } else { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: odd dev handle " "status inspect %d %d %d\n", lDevFlags[devIdx], devIdx, lDevIndex ); pPortalInfo->devTotal++; lDevFlags[devIdx] |= DPMC_LEANFLAG_AGDEVUSED; // agDev used lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // pDevice used } break; } } // end: match this wwn with previous wwn list // we have an agDev entry, but no pWWNList target for it if ( !(lDevFlags[devIdx] & DPMC_LEANFLAG_AGDEVUSED) ) { // flag dev handle not accounted for yet lDevFlags[devIdx] |= DPMC_LEANFLAG_NOWWNLIST; // later, get an empty pDevice and map this agDev. // AGTIAPI_PRINTK( "agtiapi_GetDevHandle: devIdx %d flags 0x%x, %d\n", // devIdx, lDevFlags[devIdx], (lDevFlags[devIdx] & 8) ); } } else { lDevFlags[devIdx] |= DPMC_LEANFLAG_NOAGDEVYT; // known empty agDev handle } } // AGTIAPI_PRINTK( "agtiapi_GetDevHandle: all WWN all the time, " // "devLstIdx/flags/(WWNL)portId ... \n" ); // review device list for further action needed for ( devIdx = 0; devIdx < pCard->devDiscover; devIdx++ ) { if ( lDevFlags[devIdx] & DPMC_LEANFLAG_NOWWNLIST ) // new target, register { int lNextDyad; // find next available dyad entry AGTIAPI_PRINTK( "agtiapi_GetDevHandle: register new target, " "devIdx %d -- %d \n", devIdx, pCard->devDiscover ); lRunScanFlag = TRUE; // scan and clear outstanding removals for ( lNextDyad = 0; lNextDyad < pCard->devDiscover; lNextDyad++ ) { if ( pSLRList[lNextDyad].localeNameLen < 0 && pSLRList[lNextDyad].remoteNameLen < 0 ) break; } if ( lNextDyad == pCard->devDiscover ) { printf( "agtiapi_GetDevHandle: failed to find available SAS LR\n" ); AG_LIST_UNLOCK( &pCard->devListLock ); free( (caddr_t)lDevFlags, M_PMC_MFLG ); free( (caddr_t)agDev, M_PMC_MDEV ); return 0; } // index of new entry lDevIndex = agtiapi_FindWWNListNext( pWWNList, pCard->devDiscover ); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: listIdx new target %d of %d/%d\n", lDevIndex, devTotal, pPortalInfo->portID ); if ( 0 > lDevIndex ) { printf( "agtiapi_GetDevHandle: WARNING -- WWNList exhausted.\n" ); continue; } pDevice = &pCard->pDevList[lDevIndex]; tiINIGetDeviceInfo( &pCard->tiRoot, agDev[devIdx], &pDevice->devInfo ); wwncpy( pDevice ); agtiapi_InitCCBs( pCard, 1, lDevIndex ); pDevice->pCard = (void *)pCard; pDevice->devType = DIRECT_DEVICE; // begin to populate new WWNList entry memcpy( pWWNList[lDevIndex].targetName, pDevice->targetName, pDevice->targetLen ); pWWNList[lDevIndex].targetLen = pDevice->targetLen; pWWNList[lDevIndex].flags = SOFT_MAPPED; pWWNList[lDevIndex].portId = pPortalInfo->portID; pWWNList[lDevIndex].devListIndex = lDevIndex; pWWNList[lDevIndex].sasLrIdx = lNextDyad; pSLRList[lNextDyad].localeNameLen = portInfT.localNameLen; pSLRList[lNextDyad].remoteNameLen = portInfT.remoteNameLen; memcpy( pSLRList[lNextDyad].localeName, portInfT.localName, portInfT.localNameLen ); memcpy( pSLRList[lNextDyad].remoteName, portInfT.remoteName, portInfT.remoteNameLen ); // end of populating new WWNList entry pDevice->targetId = lDevIndex; pDevice->flags = ACTIVE; pDevice->CCBCount = 0; pDevice->pDevHandle = agDev[devIdx]; agDev[devIdx]->osData = (void*)pDevice; pDevice->pPortalInfo = pPortalInfo; pDevice->portalId = pPortalInfo->portID; pPortalInfo->pDevList[lDevIndex] = (void*)pDevice; lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // mark pDevice slot used } if ( (pCard->pDevList[devIdx].portalId == pPortalInfo->portID) && !(lDevFlags[devIdx] & DPMC_LEANFLAG_PDEVSUSED) ) // pDevice not used { pDevice = &pCard->pDevList[devIdx]; //pDevice->flags &= ~ACTIVE; if ( ( pDevice->pDevHandle != NULL || pPortalInfo->pDevList[devIdx] != NULL ) ) { atomic_add_16( &pCard->rmChkCt, 1 ); // show count of lost device if (FALSE == lRunScanFlag) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: targ dropped out %d of %d/%d\n", devIdx, devTotal, pPortalInfo->portID ); // if ( 0 == pWWNList[devIdx].devRemoved ) '.devRemoved = 5; cmpsetRtn = atomic_cmpset_32( &pWWNList[devIdx].devRemoved, 0, 5 ); if ( 0 == cmpsetRtn ) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: target %d timer already set\n", devIdx ); } else { callout_reset( &pCard->devRmTimer, 1 * hz, agtiapi_devRmCheck, pCard ); } } // else ... scan coming soon enough anyway, ignore timer for dropout } } } // end of for ( devIdx = 0; ... AG_LIST_UNLOCK( &pCard->devListLock ); free((caddr_t)lDevFlags, M_PMC_MFLG); free((caddr_t)agDev, M_PMC_MDEV); if ( TRUE == lRunScanFlag ) agtiapi_clrRmScan( pCard ); return devTotal; } // end agtiapi_GetDevHandle /****************************************************************************** agtiapi_scan() Purpose: Triggers CAM's scan Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure Return: Note: ******************************************************************************/ static void agtiapi_scan(struct agtiapi_softc *pmcsc) { union ccb *ccb; int bus, tid, lun; AGTIAPI_PRINTK("agtiapi_scan: start cardNO %d \n", pmcsc->cardNo); bus = cam_sim_path(pmcsc->sim); tid = CAM_TARGET_WILDCARD; lun = CAM_LUN_WILDCARD; mtx_lock(&(pmcsc->pCardInfo->pmIOLock)); ccb = xpt_alloc_ccb_nowait(); if (ccb == agNULL) { mtx_unlock(&(pmcsc->pCardInfo->pmIOLock)); return; } if (xpt_create_path(&ccb->ccb_h.path, agNULL, bus, tid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mtx_unlock(&(pmcsc->pCardInfo->pmIOLock)); xpt_free_ccb(ccb); return; } mtx_unlock(&(pmcsc->pCardInfo->pmIOLock)); pmcsc->dev_scan = agTRUE; xpt_rescan(ccb); return; } /****************************************************************************** agtiapi_DeQueueCCB() Purpose: Remove a ccb from a queue Parameters: struct agtiapi_softc *pCard (IN) Pointer to the card structure pccb_t *phead (IN) Pointer to a head of ccb queue ccb_t *pccd (IN) Pointer to the ccb to be processed Return: AGTIAPI_SUCCESS - the ccb is removed from queue AGTIAPI_FAIL - the ccb is not found from queue Note: ******************************************************************************/ STATIC agBOOLEAN agtiapi_DeQueueCCB(struct agtiapi_softc *pCard, pccb_t *phead, pccb_t *ptail, #ifdef AGTIAPI_LOCAL_LOCK struct mtx *lock, #endif ccb_t *pccb) { ccb_t *pccb_curr; U32 status = AGTIAPI_FAIL; AGTIAPI_PRINTK("agtiapi_DeQueueCCB: %p from %p\n", pccb, phead); if (pccb == NULL || *phead == NULL) { return AGTIAPI_FAIL; } AGTIAPI_PRINTK("agtiapi_DeQueueCCB: %p from %p\n", pccb, phead); AG_LOCAL_LOCK(lock); if (pccb == *phead) { *phead = (*phead)->pccbNext; if (pccb == *ptail) { *ptail = NULL; } else pccb->pccbNext = NULL; status = AGTIAPI_SUCCESS; } else { pccb_curr = *phead; while (pccb_curr->pccbNext != NULL) { if (pccb_curr->pccbNext == pccb) { pccb_curr->pccbNext = pccb->pccbNext; pccb->pccbNext = NULL; if (pccb == *ptail) { *ptail = pccb_curr; } else pccb->pccbNext = NULL; status = AGTIAPI_SUCCESS; break; } pccb_curr = pccb_curr->pccbNext; } } AG_LOCAL_UNLOCK(lock); return status; } STATIC void wwnprintk( unsigned char *name, int len ) { int i; for (i = 0; i < len; i++, name++) AGTIAPI_PRINTK("%02x", *name); AGTIAPI_PRINTK("\n"); } /* * SAS and SATA behind expander has 8 byte long unique address. * However, direct connect SATA device use 512 byte unique device id. * SPC uses remoteName to indicate length of ID and remoteAddress for the * address of memory that holding ID. */ STATIC int wwncpy( ag_device_t *pDevice ) { int rc = 0; if (sizeof(pDevice->targetName) >= pDevice->devInfo.osAddress1 + pDevice->devInfo.osAddress2) { memcpy(pDevice->targetName, pDevice->devInfo.remoteName, pDevice->devInfo.osAddress1); memcpy(pDevice->targetName + pDevice->devInfo.osAddress1, pDevice->devInfo.remoteAddress, pDevice->devInfo.osAddress2); pDevice->targetLen = pDevice->devInfo.osAddress1 + pDevice->devInfo.osAddress2; rc = pDevice->targetLen; } else { AGTIAPI_PRINTK("WWN wrong size: %d + %d ERROR\n", pDevice->devInfo.osAddress1, pDevice->devInfo.osAddress2); rc = -1; } return rc; } /****************************************************************************** agtiapi_ReleaseCCBs() Purpose: Free all allocated CCB memories for the Host Adapter. Parameters: struct agtiapi_softc *pCard (IN) Pointer to HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_ReleaseCCBs( struct agtiapi_softc *pCard ) { ccb_hdr_t *hdr; U32 hdr_sz; ccb_t *pccb = NULL; AGTIAPI_PRINTK( "agtiapi_ReleaseCCBs: start\n" ); #if ( defined AGTIAPI_TEST_DPL || defined AGTIAPI_TEST_EPL ) ccb_t *pccb; #endif #ifdef AGTIAPI_TEST_DPL for (pccb = (pccb_t)pCard->ccbChainList; pccb != NULL; pccb = pccb->pccbChainNext) { if(pccb->dplPtr && pccb->dplDma) pci_pool_free(pCard->dpl_ctx_pool, pccb->dplPtr, pccb->dplDma); } #endif #ifdef AGTIAPI_TEST_EPL for (pccb = (pccb_t)pCard->ccbChainList; pccb != NULL; pccb = pccb->pccbChainNext) { if(pccb->epl_ptr && pccb->epl_dma_ptr) pci_pool_free( pCard->epl_ctx_pool, pccb->epl_ptr, pccb->epl_dma_ptr ); } #endif while ((hdr = pCard->ccbAllocList) != NULL) { pCard->ccbAllocList = hdr->next; hdr_sz = roundup2(sizeof(*hdr), cache_line_size()); pccb = (ccb_t*) ((char*)hdr + hdr_sz); if (pCard->buffer_dmat != NULL && pccb->CCB_dmamap != NULL) { bus_dmamap_destroy(pCard->buffer_dmat, pccb->CCB_dmamap); } free(hdr, M_PMC_MCCB); } pCard->ccbAllocList = NULL; return; } /****************************************************************************** agtiapi_TITimer() Purpose: Timer tick for tisa common layer Parameters: void *data (IN) Pointer to the HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_TITimer( void *data ) { U32 next_tick; struct agtiapi_softc *pCard; pCard = (struct agtiapi_softc *)data; // AGTIAPI_PRINTK("agtiapi_TITimer: start\n"); AG_GLOBAL_ARG( flags ); next_tick = pCard->pCardInfo->tiRscInfo.tiLoLevelResource. loLevelOption.usecsPerTick / USEC_PER_TICK; if( next_tick == 0 ) /* no timer required */ return; AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); if( pCard->flags & AGTIAPI_SHUT_DOWN ) goto ext; tiCOMTimerTick( &pCard->tiRoot ); /* tisa common layer timer tick */ //add for polling mode #ifdef PMC_SPC if( agtiapi_polling_mode ) agtiapi_CheckAllVectors( pCard, tiNonInterruptContext ); #endif callout_reset( &pCard->OS_timer, next_tick, agtiapi_TITimer, pCard ); ext: AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); return; } /****************************************************************************** agtiapi_clrRmScan() Purpose: Clears device list entries scheduled for timeout and calls scan Parameters: struct agtiapi_softc *pCard (IN) Pointer to HBA data structure ******************************************************************************/ STATIC void agtiapi_clrRmScan( struct agtiapi_softc *pCard ) { ag_tgt_map_t *pWWNList; ag_portal_info_t *pPortalInfo; ag_portal_data_t *pPortalData; int lIdx; bit32 lReadRm; bit16 lReadCt; pWWNList = pCard->pWWNList; AGTIAPI_PRINTK( "agtiapi_clrRmScan: start\n" ); AG_LIST_LOCK( &pCard->devListLock ); for ( lIdx = 0; lIdx < pCard->devDiscover; lIdx++ ) { lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( 0 == lReadCt ) { break; // trim to who cares } lReadRm = atomic_readandclear_32( &pWWNList[lIdx].devRemoved ); if ( lReadRm > 0 ) { pCard->pDevList[lIdx].flags &= ~ACTIVE; pCard->pDevList[lIdx].pDevHandle = NULL; pPortalData = &pCard->pPortalData[pWWNList[lIdx].portId]; pPortalInfo = &pPortalData->portalInfo; pPortalInfo->pDevList[lIdx] = NULL; AGTIAPI_PRINTK( "agtiapi_clrRmScan: cleared dev %d at port %d\n", lIdx, pWWNList[lIdx].portId ); atomic_subtract_16( &pCard->rmChkCt, 1 ); } } AG_LIST_UNLOCK( &pCard->devListLock ); agtiapi_scan( pCard ); } /****************************************************************************** agtiapi_devRmCheck() Purpose: Timer tick to check for timeout on missing targets Removes device list entry when timeout is reached Parameters: void *data (IN) Pointer to the HBA data structure ******************************************************************************/ STATIC void agtiapi_devRmCheck( void *data ) { struct agtiapi_softc *pCard; ag_tgt_map_t *pWWNList; int lIdx, cmpsetRtn, lRunScanFlag = FALSE; bit16 lReadCt; bit32 lReadRm; pCard = ( struct agtiapi_softc * )data; // routine overhead if ( callout_pending( &pCard->devRmTimer ) ) // callout was reset { return; } if ( !callout_active( &pCard->devRmTimer ) ) // callout was stopped { return; } callout_deactivate( &pCard->devRmTimer ); if( pCard->flags & AGTIAPI_SHUT_DOWN ) { return; // implicit timer clear } pWWNList = pCard->pWWNList; AG_LIST_LOCK( &pCard->devListLock ); lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( lReadCt ) { if ( callout_pending(&pCard->devRmTimer) == FALSE ) { callout_reset( &pCard->devRmTimer, 1 * hz, agtiapi_devRmCheck, pCard ); } else { AG_LIST_UNLOCK( &pCard->devListLock ); return; } for ( lIdx = 0; lIdx < pCard->devDiscover; lIdx++ ) { lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( 0 == lReadCt ) { break; // if handled somewhere else, get out } lReadRm = atomic_load_acq_32( &pWWNList[lIdx].devRemoved ); if ( lReadRm > 0 ) { if ( 1 == lReadRm ) // timed out { // no decrement of devRemoved as way to leave a clrRmScan marker lRunScanFlag = TRUE; // other devRemoved values are about to get wiped break; // ... so bail out } else { AGTIAPI_PRINTK( "agtiapi_devRmCheck: counting down dev %d @ %d; %d\n", lIdx, lReadRm, lReadCt ); cmpsetRtn = atomic_cmpset_32( &pWWNList[lIdx].devRemoved, lReadRm, lReadRm-1 ); if ( 0 == cmpsetRtn ) { printf( "agtiapi_devRmCheck: %d decrement already handled\n", lIdx ); } } } } AG_LIST_UNLOCK( &pCard->devListLock ); if ( TRUE == lRunScanFlag ) agtiapi_clrRmScan( pCard ); } else { AG_LIST_UNLOCK( &pCard->devListLock ); } return; } static void agtiapi_cam_poll( struct cam_sim *asim ) { return; } /***************************************************************************** agtiapi_ResetCard() Purpose: Hard or soft reset on the controller and resend any outstanding requests if needed. Parameters: struct agtiapi_softc *pCard (IN) Pointer to HBA data structure unsigned lomg flags (IN/OUT) Flags used in locking done from calling layers Return: AGTIAPI_SUCCESS - reset successful AGTIAPI_FAIL - reset failed Note: *****************************************************************************/ U32 agtiapi_ResetCard( struct agtiapi_softc *pCard, unsigned long *flags ) { ag_device_t *pDevice; U32 lIdx = 0; U32 lFlagVal; agBOOLEAN ret; ag_portal_info_t *pPortalInfo; ag_portal_data_t *pPortalData; U32 count, loop; int szdv; if( pCard->flags & AGTIAPI_RESET ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: reset card already in progress!\n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_ResetCard: Enter cnt %d\n", pCard->resetCount ); #ifdef LOGEVENT agtiapi_LogEvent( pCard, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "Reset initiator time = %d!", pCard->resetCount + 1 ); #endif pCard->flags |= AGTIAPI_RESET; pCard->flags &= ~(AGTIAPI_CB_DONE | AGTIAPI_RESET_SUCCESS); tiCOMSystemInterruptsActive( &pCard->tiRoot, FALSE ); pCard->flags &= ~AGTIAPI_SYS_INTR_ON; agtiapi_FlushCCBs( pCard, AGTIAPI_CALLBACK ); for ( lIdx = 1; 3 >= lIdx; lIdx++ ) // we try reset up to 3 times { if( pCard->flags & AGTIAPI_SOFT_RESET ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: soft variant\n" ); tiCOMReset( &pCard->tiRoot, tiSoftReset ); } else { AGTIAPI_PRINTK( "agtiapi_ResetCard: no flag, no reset!\n" ); } lFlagVal = AGTIAPI_RESET_SUCCESS; AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, *flags ); ret = agtiapi_CheckCB( pCard, 50000, lFlagVal, &pCard->flags ); AG_SPIN_LOCK_IRQ( agtiapi_host_lock, *flags ); if( ret == AGTIAPI_FAIL ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: CheckCB indicates failed reset call, " "try again?\n" ); } else { break; } } if ( 1 < lIdx ) { if ( AGTIAPI_FAIL == ret ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: soft reset failed after try %d\n", lIdx ); } else { AGTIAPI_PRINTK( "agtiapi_ResetCard: soft reset success at try %d\n", lIdx ); } } if( AGTIAPI_FAIL == ret ) { printf( "agtiapi_ResetCard: reset ERROR\n" ); pCard->flags &= ~AGTIAPI_INSTALLED; return AGTIAPI_FAIL; } pCard->flags &= ~AGTIAPI_SOFT_RESET; // disable all devices pDevice = pCard->pDevList; for( lIdx = 0; lIdx < maxTargets; lIdx++, pDevice++ ) { /* if ( pDevice->flags & ACTIVE ) { printf( "agtiapi_ResetCard: before ... active device %d\n", lIdx ); } */ pDevice->flags &= ~ACTIVE; } AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, *flags ); if( tiCOMPortInit( &pCard->tiRoot, agFALSE ) != tiSuccess ) printf( "agtiapi_ResetCard: tiCOMPortInit FAILED \n" ); else AGTIAPI_PRINTK( "agtiapi_ResetCard: tiCOMPortInit success\n" ); if( !pCard->pDevList ) { // try to get a little sanity here AGTIAPI_PRINTK( "agtiapi_ResetCard: no pDevList ERROR %p\n", pCard->pDevList ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_ResetCard: pre target-count %d port-count %d\n", pCard->tgtCount, pCard->portCount ); pCard->tgtCount = 0; DELAY( 500000 ); pCard->flags &= ~AGTIAPI_CB_DONE; pPortalData = pCard->pPortalData; for( count = 0; count < pCard->portCount; count++ ) { AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); pPortalInfo = &pPortalData->portalInfo; pPortalInfo->portStatus = 0; pPortalInfo->portStatus &= ~( AGTIAPI_PORT_START | AGTIAPI_PORT_DISC_READY | AGTIAPI_DISC_DONE | AGTIAPI_DISC_COMPLETE ); szdv = sizeof( pPortalInfo->pDevList ) / sizeof( pPortalInfo->pDevList[0] ); if (szdv > pCard->devDiscover) { szdv = pCard->devDiscover; } for( lIdx = 0, loop = 0; lIdx < szdv && loop < pPortalInfo->devTotal; lIdx++ ) { pDevice = (ag_device_t*)pPortalInfo->pDevList[lIdx]; if( pDevice ) { loop++; pDevice->pDevHandle = 0; // mark for availability in pCard->pDevList[] // don't erase more as the device is scheduled for removal on DPC } AGTIAPI_PRINTK( "agtiapi_ResetCard: reset pDev %p pDevList %p idx %d\n", pDevice, pPortalInfo->pDevList, lIdx ); pPortalInfo->devTotal = pPortalInfo->devPrev = 0; } for( lIdx = 0; lIdx < maxTargets; lIdx++ ) { // we reconstruct dev list later in get dev handle pPortalInfo->pDevList[lIdx] = NULL; } for( loop = 0; loop < AGTIAPI_LOOP_MAX; loop++ ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: tiCOMPortStart entry data " "%p / %d / %p\n", &pCard->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext ); if( tiCOMPortStart( &pCard->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext, 0 ) != tiSuccess ) { printf( "agtiapi_ResetCard: tiCOMPortStart %d FAILED\n", pPortalInfo->portID ); } else { AGTIAPI_PRINTK( "agtiapi_ResetCard: tiCOMPortStart %d success\n", pPortalInfo->portID ); break; } } AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); tiCOMGetPortInfo( &pCard->tiRoot, &pPortalInfo->tiPortalContext, &pPortalInfo->tiPortInfo ); pPortalData++; } // ## fail case: pCard->flags &= ~AGTIAPI_INSTALLED; AG_SPIN_LOCK_IRQ(agtiapi_host_lock, *flags); if( !(pCard->flags & AGTIAPI_INSTALLED) ) // driver not installed ! { printf( "agtiapi_ResetCard: error, driver not intstalled? " "!AGTIAPI_INSTALLED \n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_ResetCard: total device %d\n", pCard->tgtCount ); #ifdef LOGEVENT agtiapi_LogEvent( pCard, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "Reset initiator total device = %d!", pCard->tgtCount ); #endif pCard->resetCount++; AGTIAPI_PRINTK( "agtiapi_ResetCard: clear send and done queues\n" ); // clear send & done queue AG_LOCAL_LOCK( &pCard->sendLock ); pCard->ccbSendHead = NULL; pCard->ccbSendTail = NULL; AG_LOCAL_UNLOCK( &pCard->sendLock ); AG_LOCAL_LOCK( &pCard->doneLock ); pCard->ccbDoneHead = NULL; pCard->ccbDoneTail = NULL; AG_LOCAL_UNLOCK( &pCard->doneLock ); // clear smp queues also AG_LOCAL_LOCK( &pCard->sendSMPLock ); pCard->smpSendHead = NULL; pCard->smpSendTail = NULL; AG_LOCAL_UNLOCK( &pCard->sendSMPLock ); AG_LOCAL_LOCK( &pCard->doneSMPLock ); pCard->smpDoneHead = NULL; pCard->smpDoneTail = NULL; AG_LOCAL_UNLOCK( &pCard->doneSMPLock ); // finished with all reset stuff, now start things back up tiCOMSystemInterruptsActive( &pCard->tiRoot, TRUE ); pCard->flags |= AGTIAPI_SYS_INTR_ON; pCard->flags |= AGTIAPI_HAD_RESET; pCard->flags &= ~AGTIAPI_RESET; // ## agtiapi_StartIO( pCard ); AGTIAPI_PRINTK( "agtiapi_ResetCard: local return success\n" ); return AGTIAPI_SUCCESS; } // agtiapi_ResetCard /****************************************************************************** agtiapi_ReleaseHBA() Purpose: Releases all resources previously acquired to support a specific Host Adapter, including the I/O Address range, and unregisters the agtiapi Host Adapter. Parameters: device_t dev (IN) - device pointer Return: always return 0 - success Note: ******************************************************************************/ int agtiapi_ReleaseHBA( device_t dev ) { int thisCard = device_get_unit( dev ); // keeping get_unit call to once int i; ag_card_info_t *thisCardInst = &agCardInfoList[ thisCard ]; struct ccb_setasync csa; struct agtiapi_softc *pCard; pCard = device_get_softc( dev ); ag_card_info_t *pCardInfo = pCard->pCardInfo; ag_resource_info_t *pRscInfo = &thisCardInst->tiRscInfo; AG_GLOBAL_ARG(flags); AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: start\n" ); if (thisCardInst != pCardInfo) { AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: Wrong ag_card_info_t thisCardInst %p " "pCardInfo %p\n", thisCardInst, pCardInfo ); panic( "agtiapi_ReleaseHBA: Wrong ag_card_info_t thisCardInst %p pCardInfo " "%p\n", thisCardInst, pCardInfo ); return( EIO ); } AGTIAPI_PRINTK( "agtiapi_ReleaseHBA card %p\n", pCard ); pCard->flags |= AGTIAPI_SHUT_DOWN; // remove timer if (pCard->flags & AGTIAPI_TIMER_ON) { AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); callout_drain( &pCard->OS_timer ); callout_drain( &pCard->devRmTimer ); callout_drain(&pCard->IO_timer); AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: timer released\n" ); } #ifdef HIALEAH_ENCRYPTION //Release encryption table memory - Fix it //if(pCard->encrypt && (pCard->flags & AGTIAPI_INSTALLED)) //agtiapi_CleanupEncryption(pCard); #endif /* * Shutdown the channel so that chip gets frozen * and it does not do any more pci-bus accesses. */ if (pCard->flags & AGTIAPI_SYS_INTR_ON) { tiCOMSystemInterruptsActive( &pCard->tiRoot, FALSE ); pCard->flags &= ~AGTIAPI_SYS_INTR_ON; AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: card interrupt off\n" ); } if (pCard->flags & AGTIAPI_INSTALLED) { tiCOMShutDown( &pCard->tiRoot ); AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: low layers shutdown\n" ); } /* * first release IRQ, so that we do not get any more interrupts * from this host */ if (pCard->flags & AGTIAPI_IRQ_REQUESTED) { if (!agtiapi_intx_mode) { int i; for (i = 0; i< MAX_MSIX_NUM_VECTOR; i++) { if (pCard->irq[i] != agNULL && pCard->rscID[i] != 0) { bus_teardown_intr(dev, pCard->irq[i], pCard->intrcookie[i]); bus_release_resource( dev, SYS_RES_IRQ, pCard->rscID[i], pCard->irq[i] ); } } pci_release_msi(dev); } pCard->flags &= ~AGTIAPI_IRQ_REQUESTED; #ifdef AGTIAPI_DPC for (i = 0; i < MAX_MSIX_NUM_DPC; i++) tasklet_kill(&pCard->tasklet_dpc[i]); #endif AGTIAPI_PRINTK("agtiapi_ReleaseHBA: IRQ released\n"); } // release memory vs. alloc in agtiapi_alloc_ostimem; used in ostiAllocMemory if( pCard->osti_busaddr != 0 ) { bus_dmamap_unload( pCard->osti_dmat, pCard->osti_mapp ); } if( pCard->osti_mem != NULL ) { bus_dmamem_free( pCard->osti_dmat, pCard->osti_mem, pCard->osti_mapp ); } if( pCard->osti_dmat != NULL ) { bus_dma_tag_destroy( pCard->osti_dmat ); } /* unmap the mapped PCI memory */ /* calls bus_release_resource( ,SYS_RES_MEMORY, ..) */ agtiapi_ReleasePCIMem(thisCardInst); /* release all ccbs */ if (pCard->ccbTotal) { //calls bus_dmamap_destroy() for all pccbs agtiapi_ReleaseCCBs(pCard); AGTIAPI_PRINTK("agtiapi_ReleaseHBA: CCB released\n"); } #ifdef HIALEAH_ENCRYPTION /*release encryption resources - Fix it*/ if(pCard->encrypt) { /*Check that all IO's are completed */ if(atomic_read (&outstanding_encrypted_io_count) > 0) { printf("%s: WARNING: %d outstanding encrypted IOs !\n", __FUNCTION__, atomic_read(&outstanding_encrypted_io_count)); } //agtiapi_CleanupEncryptionPools(pCard); } #endif /* release device list */ if( pCard->pDevList ) { free((caddr_t)pCard->pDevList, M_PMC_MDVT); pCard->pDevList = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: device list released\n"); } #ifdef LINUX_PERBI_SUPPORT // ## review use of PERBI AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: WWN list %p \n", pCard->pWWNList ); if( pCard->pWWNList ) { free( (caddr_t)pCard->pWWNList, M_PMC_MTGT ); pCard->pWWNList = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: WWN list released\n"); } if( pCard->pSLRList ) { free( (caddr_t)pCard->pSLRList, M_PMC_MSLR ); pCard->pSLRList = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: SAS Local Remote list released\n"); } #endif if (pCard->pPortalData) { free((caddr_t)pCard->pPortalData, M_PMC_MPRT); pCard->pPortalData = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: PortalData released\n"); } //calls contigfree() or free() agtiapi_MemFree(pCardInfo); AGTIAPI_PRINTK("agtiapi_ReleaseHBA: low level resource released\n"); #ifdef HOTPLUG_SUPPORT if (pCard->flags & AGTIAPI_PORT_INITIALIZED) { // agtiapi_FreeDevWorkList(pCard); AGTIAPI_PRINTK("agtiapi_ReleaseHBA: (HP dev) work resources released\n"); } #endif /* * TBD, scsi_unregister may release wrong host data structure * which cause NULL pointer shows up. */ if (pCard->flags & AGTIAPI_SCSI_REGISTERED) { pCard->flags &= ~AGTIAPI_SCSI_REGISTERED; #ifdef AGTIAPI_LOCAL_LOCK if (pCard->STLock) { //destroy mtx int maxLocks; maxLocks = pRscInfo->tiLoLevelResource.loLevelOption.numOfQueuesPerPort; for( i = 0; i < maxLocks; i++ ) { mtx_destroy(&pCard->STLock[i]); } free(pCard->STLock, M_PMC_MSTL); pCard->STLock = NULL; } #endif } ag_card_good--; /* reset agtiapi_1st_time if this is the only card */ if (!ag_card_good && !agtiapi_1st_time) { agtiapi_1st_time = 1; } /* for tiSgl_t memeory */ if (pCard->tisgl_busaddr != 0) { bus_dmamap_unload(pCard->tisgl_dmat, pCard->tisgl_map); } if (pCard->tisgl_mem != NULL) { bus_dmamem_free(pCard->tisgl_dmat, pCard->tisgl_mem, pCard->tisgl_map); } if (pCard->tisgl_dmat != NULL) { bus_dma_tag_destroy(pCard->tisgl_dmat); } if (pCard->buffer_dmat != agNULL) { bus_dma_tag_destroy(pCard->buffer_dmat); } if (pCard->sim != NULL) { mtx_lock(&thisCardInst->pmIOLock); xpt_setup_ccb(&csa.ccb_h, pCard->path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = agtiapi_async; csa.callback_arg = pCard; xpt_action((union ccb *)&csa); xpt_free_path(pCard->path); // if (pCard->ccbTotal == 0) if (pCard->ccbTotal <= thisCard) { /* no link up so that simq has not been released. In order to remove cam, we call this. */ xpt_release_simq(pCard->sim, 1); } xpt_bus_deregister(cam_sim_path(pCard->sim)); cam_sim_free(pCard->sim, FALSE); mtx_unlock(&thisCardInst->pmIOLock); } if (pCard->devq != NULL) { cam_simq_free(pCard->devq); } //destroy mtx mtx_destroy( &thisCardInst->pmIOLock ); mtx_destroy( &pCard->sendLock ); mtx_destroy( &pCard->doneLock ); mtx_destroy( &pCard->sendSMPLock ); mtx_destroy( &pCard->doneSMPLock ); mtx_destroy( &pCard->ccbLock ); mtx_destroy( &pCard->devListLock ); mtx_destroy( &pCard->OS_timer_lock ); mtx_destroy( &pCard->devRmTimerLock ); mtx_destroy( &pCard->memLock ); mtx_destroy( &pCard->freezeLock ); destroy_dev( pCard->my_cdev ); memset((void *)pCardInfo, 0, sizeof(ag_card_info_t)); return 0; } // Called during system shutdown after sync static int agtiapi_shutdown( device_t dev ) { AGTIAPI_PRINTK( "agtiapi_shutdown\n" ); return( 0 ); } static int agtiapi_suspend( device_t dev ) // Device suspend routine. { AGTIAPI_PRINTK( "agtiapi_suspend\n" ); return( 0 ); } static int agtiapi_resume( device_t dev ) // Device resume routine. { AGTIAPI_PRINTK( "agtiapi_resume\n" ); return( 0 ); } static device_method_t agtiapi_methods[] = { // Device interface DEVMETHOD( device_probe, agtiapi_probe ), DEVMETHOD( device_attach, agtiapi_attach ), DEVMETHOD( device_detach, agtiapi_ReleaseHBA ), DEVMETHOD( device_shutdown, agtiapi_shutdown ), DEVMETHOD( device_suspend, agtiapi_suspend ), DEVMETHOD( device_resume, agtiapi_resume ), { 0, 0 } }; static devclass_t pmspcv_devclass; static driver_t pmspcv_driver = { "pmspcv", agtiapi_methods, sizeof( struct agtiapi_softc ) }; DRIVER_MODULE( pmspcv, pci, pmspcv_driver, pmspcv_devclass, 0, 0 ); MODULE_DEPEND( pmspcv, cam, 1, 1, 1 ); MODULE_DEPEND( pmspcv, pci, 1, 1, 1 ); #include #include #include #include Index: head/sys/dev/qlxgbe/ql_hw.c =================================================================== --- head/sys/dev/qlxgbe/ql_hw.c (revision 359440) +++ head/sys/dev/qlxgbe/ql_hw.c (revision 359441) @@ -1,5692 +1,5692 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: ql_hw.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. * Content: Contains Hardware dependent functions */ #include __FBSDID("$FreeBSD$"); #include "ql_os.h" #include "ql_hw.h" #include "ql_def.h" #include "ql_inline.h" #include "ql_ver.h" #include "ql_glbl.h" #include "ql_dbg.h" #include "ql_minidump.h" /* * Static Functions */ static void qla_del_rcv_cntxt(qla_host_t *ha); static int qla_init_rcv_cntxt(qla_host_t *ha); static int qla_del_xmt_cntxt(qla_host_t *ha); static int qla_init_xmt_cntxt(qla_host_t *ha); static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause); static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, uint32_t create); static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id); static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, int rcv); static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode); static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id); static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr); static int qla_hw_add_all_mcast(qla_host_t *ha); static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds); static int qla_init_nic_func(qla_host_t *ha); static int qla_stop_nic_func(qla_host_t *ha); static int qla_query_fw_dcbx_caps(qla_host_t *ha); static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits); static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits); static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode); static int qla_get_cam_search_mode(qla_host_t *ha); static void ql_minidump_free(qla_host_t *ha); #ifdef QL_DBG static void qla_stop_pegs(qla_host_t *ha) { uint32_t val = 1; ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0); ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0); device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__); } static int qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); if (ret == 1) { ha = (qla_host_t *)arg1; if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { qla_stop_pegs(ha); QLA_UNLOCK(ha, __func__); } } return err; } #endif /* #ifdef QL_DBG */ static int qla_validate_set_port_cfg_bit(uint32_t bits) { if ((bits & 0xF) > 1) return (-1); if (((bits >> 4) & 0xF) > 2) return (-1); if (((bits >> 8) & 0xF) > 2) return (-1); return (0); } static int qla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; uint32_t cfg_bits; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); ha = (qla_host_t *)arg1; if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) { err = qla_get_port_config(ha, &cfg_bits); if (err) goto qla_sysctl_set_port_cfg_exit; if (ret & 0x1) { cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE; } else { cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE; } ret = ret >> 4; cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK; if ((ret & 0xF) == 0) { cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED; } else if ((ret & 0xF) == 1){ cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD; } else { cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM; } ret = ret >> 4; cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK; if (ret == 0) { cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV; } else if (ret == 1){ cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT; } else { cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV; } if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_set_port_config(ha, cfg_bits); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } } else { if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_get_port_config(ha, &cfg_bits); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } } qla_sysctl_set_port_cfg_exit: return err; } static int qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); ha = (qla_host_t *)arg1; if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) || (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) { if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_set_cam_search_mode(ha, (uint32_t)ret); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } } else { device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret); } return (err); } static int qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); ha = (qla_host_t *)arg1; if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { err = qla_get_cam_search_mode(ha); QLA_UNLOCK(ha, __func__); } else { device_printf(ha->pci_dev, "%s: failed\n", __func__); } return (err); } static void qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *ctx_oid; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_mac"); children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_frames", CTLFLAG_RD, &ha->hw.mac.xmt_frames, "xmt_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_bytes", CTLFLAG_RD, &ha->hw.mac.xmt_bytes, "xmt_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_mcast_pkts", CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts, "xmt_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_bcast_pkts", CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts, "xmt_bcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pause_frames", CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames, "xmt_pause_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_cntrl_pkts", CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts, "xmt_cntrl_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_64bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes, "xmt_pkt_lt_64bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_127bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes, "xmt_pkt_lt_127bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_255bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes, "xmt_pkt_lt_255bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_511bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes, "xmt_pkt_lt_511bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_1023bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes, "xmt_pkt_lt_1023bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_lt_1518bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes, "xmt_pkt_lt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "xmt_pkt_gt_1518bytes", CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes, "xmt_pkt_gt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_frames", CTLFLAG_RD, &ha->hw.mac.rcv_frames, "rcv_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_bytes", CTLFLAG_RD, &ha->hw.mac.rcv_bytes, "rcv_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_mcast_pkts", CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts, "rcv_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_bcast_pkts", CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts, "rcv_bcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pause_frames", CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames, "rcv_pause_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_cntrl_pkts", CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts, "rcv_cntrl_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_64bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes, "rcv_pkt_lt_64bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_127bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes, "rcv_pkt_lt_127bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_255bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes, "rcv_pkt_lt_255bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_511bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes, "rcv_pkt_lt_511bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_1023bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes, "rcv_pkt_lt_1023bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_lt_1518bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes, "rcv_pkt_lt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_pkt_gt_1518bytes", CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes, "rcv_pkt_gt_1518bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_len_error", CTLFLAG_RD, &ha->hw.mac.rcv_len_error, "rcv_len_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_len_small", CTLFLAG_RD, &ha->hw.mac.rcv_len_small, "rcv_len_small"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_len_large", CTLFLAG_RD, &ha->hw.mac.rcv_len_large, "rcv_len_large"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_jabber", CTLFLAG_RD, &ha->hw.mac.rcv_jabber, "rcv_jabber"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rcv_dropped", CTLFLAG_RD, &ha->hw.mac.rcv_dropped, "rcv_dropped"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "fcs_error", CTLFLAG_RD, &ha->hw.mac.fcs_error, "fcs_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "align_error", CTLFLAG_RD, &ha->hw.mac.align_error, "align_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_frames, "eswitched_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_bytes", CTLFLAG_RD, &ha->hw.mac.eswitched_bytes, "eswitched_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_mcast_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames, "eswitched_mcast_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_bcast_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames, "eswitched_bcast_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_ucast_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames, "eswitched_ucast_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_err_free_frames", CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames, "eswitched_err_free_frames"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "eswitched_err_free_bytes", CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes, "eswitched_err_free_bytes"); return; } static void qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *ctx_oid; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_rcv"); children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "total_bytes", CTLFLAG_RD, &ha->hw.rcv.total_bytes, "total_bytes"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "total_pkts", CTLFLAG_RD, &ha->hw.rcv.total_pkts, "total_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_pkt_count", CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count, "lro_pkt_count"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "sw_pkt_count", CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count, "sw_pkt_count"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "ip_chksum_err", CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err, "ip_chksum_err"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_wo_acntxts", CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts, "pkts_wo_acntxts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_dropped_no_sds_card", CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card, "pkts_dropped_no_sds_card"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_dropped_no_sds_host", CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host, "pkts_dropped_no_sds_host"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "oversized_pkts", CTLFLAG_RD, &ha->hw.rcv.oversized_pkts, "oversized_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_dropped_no_rds", CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds, "pkts_dropped_no_rds"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "unxpctd_mcast_pkts", CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts, "unxpctd_mcast_pkts"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "re1_fbq_error", CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error, "re1_fbq_error"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "invalid_mac_addr", CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr, "invalid_mac_addr"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rds_prime_trys", CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys, "rds_prime_trys"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "rds_prime_success", CTLFLAG_RD, &ha->hw.rcv.rds_prime_success, "rds_prime_success"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_flows_added", CTLFLAG_RD, &ha->hw.rcv.lro_flows_added, "lro_flows_added"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_flows_deleted", CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted, "lro_flows_deleted"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "lro_flows_active", CTLFLAG_RD, &ha->hw.rcv.lro_flows_active, "lro_flows_active"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_droped_unknown", CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown, "pkts_droped_unknown"); SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, "pkts_cnt_oversized", CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized, "pkts_cnt_oversized"); return; } static void qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_hw_xmt"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); /* Tx Related */ SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "total_bytes", CTLFLAG_RD, &ha->hw.xmt[i].total_bytes, "total_bytes"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "total_pkts", CTLFLAG_RD, &ha->hw.xmt[i].total_pkts, "total_pkts"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "errors", CTLFLAG_RD, &ha->hw.xmt[i].errors, "errors"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "pkts_dropped", CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped, "pkts_dropped"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "switch_pkts", CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts, "switch_pkts"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "num_buffers", CTLFLAG_RD, &ha->hw.xmt[i].num_buffers, "num_buffers"); } return; } static void qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *node_children; ctx = device_get_sysctl_ctx(ha->pci_dev); node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_lt_200ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0], "mbx_completion_time_lt_200ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_200ms_400ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1], "mbx_completion_time_200ms_400ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_400ms_600ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2], "mbx_completion_time_400ms_600ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_600ms_800ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3], "mbx_completion_time_600ms_800ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_800ms_1000ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4], "mbx_completion_time_800ms_1000ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_1000ms_1200ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5], "mbx_completion_time_1000ms_1200ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_1200ms_1400ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6], "mbx_completion_time_1200ms_1400ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_1400ms_1600ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7], "mbx_completion_time_1400ms_1600ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_1600ms_1800ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8], "mbx_completion_time_1600ms_1800ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_1800ms_2000ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9], "mbx_completion_time_1800ms_2000ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_2000ms_2200ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10], "mbx_completion_time_2000ms_2200ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_2200ms_2400ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11], "mbx_completion_time_2200ms_2400ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_2400ms_2600ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12], "mbx_completion_time_2400ms_2600ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_2600ms_2800ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13], "mbx_completion_time_2600ms_2800ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_2800ms_3000ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14], "mbx_completion_time_2800ms_3000ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_3000ms_4000ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15], "mbx_completion_time_3000ms_4000ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_time_4000ms_5000ms", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16], "mbx_completion_time_4000ms_5000ms"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17], "mbx_completion_host_mbx_cntrl_timeout"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout", CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18], "mbx_completion_fw_mbx_cntrl_timeout"); return; } static void qlnx_add_hw_stats_sysctls(qla_host_t *ha) { qlnx_add_hw_mac_stats_sysctls(ha); qlnx_add_hw_rcv_stats_sysctls(ha); qlnx_add_hw_xmt_stats_sysctls(ha); qlnx_add_hw_mbx_cmpl_stats_sysctls(ha); return; } static void qlnx_add_drvr_sds_stats(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_sds"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_sds_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "intr_count", CTLFLAG_RD, &ha->hw.sds[i].intr_count, "intr_count"); SYSCTL_ADD_UINT(ctx, node_children, OID_AUTO, "rx_free", CTLFLAG_RD, &ha->hw.sds[i].rx_free, ha->hw.sds[i].rx_free, "rx_free"); } return; } static void qlnx_add_drvr_rds_stats(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_rds"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_rds_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "count", CTLFLAG_RD, &ha->hw.rds[i].count, "count"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_pkt_count", CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count, "lro_pkt_count"); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "lro_bytes", CTLFLAG_RD, &ha->hw.rds[i].lro_bytes, "lro_bytes"); } return; } static void qlnx_add_drvr_tx_stats(qla_host_t *ha) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid_list *node_children; struct sysctl_oid *ctx_oid; int i; uint8_t name_str[16]; ctx = device_get_sysctl_ctx(ha->pci_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "stats_drvr_xmt"); children = SYSCTL_CHILDREN(ctx_oid); for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); snprintf(name_str, sizeof(name_str), "%d", i); ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); node_children = SYSCTL_CHILDREN(ctx_oid); SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "count", CTLFLAG_RD, &ha->tx_ring[i].count, "count"); #ifdef QL_ENABLE_ISCSI_TLV SYSCTL_ADD_QUAD(ctx, node_children, OID_AUTO, "iscsi_pkt_count", CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count, "iscsi_pkt_count"); #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ } return; } static void qlnx_add_drvr_stats_sysctls(qla_host_t *ha) { qlnx_add_drvr_sds_stats(ha); qlnx_add_drvr_rds_stats(ha); qlnx_add_drvr_tx_stats(ha); return; } /* * Name: ql_hw_add_sysctls * Function: Add P3Plus specific sysctls */ void ql_hw_add_sysctls(qla_host_t *ha) { device_t dev; dev = ha->pci_dev; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings, ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings, ha->hw.num_sds_rings, "Number of Status Descriptor Rings"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings, ha->hw.num_tx_rings, "Number of Transmit Rings"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx, ha->txr_idx, "Tx Ring Used"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs, ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt"); ha->hw.sds_cidx_thres = 32; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres, ha->hw.sds_cidx_thres, "Number of SDS entries to process before updating" " SDS Ring Consumer Index"); ha->hw.rds_pidx_thres = 32; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres, ha->hw.rds_pidx_thres, "Number of Rcv Rings Entries to post before updating" " RDS Ring Producer Index"); ha->hw.rcv_intr_coalesce = (3 << 16) | 256; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW, &ha->hw.rcv_intr_coalesce, ha->hw.rcv_intr_coalesce, "Rcv Intr Coalescing Parameters\n" "\tbits 15:0 max packets\n" "\tbits 31:16 max micro-seconds to wait\n" "\tplease run\n" "\tifconfig down && ifconfig up\n" "\tto take effect \n"); ha->hw.xmt_intr_coalesce = (64 << 16) | 64; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW, &ha->hw.xmt_intr_coalesce, ha->hw.xmt_intr_coalesce, "Xmt Intr Coalescing Parameters\n" "\tbits 15:0 max packets\n" "\tbits 31:16 max micro-seconds to wait\n" "\tplease run\n" "\tifconfig down && ifconfig up\n" "\tto take effect \n"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qla_sysctl_port_cfg, "I", "Set Port Configuration if values below " "otherwise Get Port Configuration\n" "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n" "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n" "\tBits 8-11: std pause cfg; 0 = xmt and rcv;" " 1 = xmt only; 2 = rcv only;\n"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qla_sysctl_set_cam_search_mode, "I", "Set CAM Search Mode" "\t 1 = search mode internal\n" "\t 2 = search mode auto\n"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qla_sysctl_get_cam_search_mode, "I", "Get CAM Search Mode" "\t 1 = search mode internal\n" "\t 2 = search mode auto\n"); ha->hw.enable_9kb = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb, ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000"); ha->hw.enable_hw_lro = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro, ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n" "\t 1 : Hardware LRO if LRO is enabled\n" "\t 0 : Software LRO if LRO is enabled\n" "\t Any change requires ifconfig down/up to take effect\n" "\t Note that LRO may be turned off/on via ifconfig\n"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index, ha->hw.sp_log_index, "sp_log_index"); SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop, ha->hw.sp_log_stop, "sp_log_stop"); ha->hw.sp_log_stop_events = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sp_log_stop_events", CTLFLAG_RW, &ha->hw.sp_log_stop_events, ha->hw.sp_log_stop_events, "Slow path event log is stopped" " when OR of the following events occur \n" "\t 0x01 : Heart beat Failure\n" "\t 0x02 : Temperature Failure\n" "\t 0x04 : HW Initialization Failure\n" "\t 0x08 : Interface Initialization Failure\n" "\t 0x10 : Error Recovery Failure\n"); ha->hw.mdump_active = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active, ha->hw.mdump_active, "Minidump retrieval is Active"); ha->hw.mdump_done = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "mdump_done", CTLFLAG_RW, &ha->hw.mdump_done, ha->hw.mdump_done, "Minidump has been done and available for retrieval"); ha->hw.mdump_capture_mask = 0xF; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "minidump_capture_mask", CTLFLAG_RW, &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask, "Minidump capture mask"); #ifdef QL_DBG ha->err_inject = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "err_inject", CTLFLAG_RW, &ha->err_inject, ha->err_inject, "Error to be injected\n" "\t\t\t 0: No Errors\n" "\t\t\t 1: rcv: rxb struct invalid\n" "\t\t\t 2: rcv: mp == NULL\n" "\t\t\t 3: lro: rxb struct invalid\n" "\t\t\t 4: lro: mp == NULL\n" "\t\t\t 5: rcv: num handles invalid\n" "\t\t\t 6: reg: indirect reg rd_wr failure\n" "\t\t\t 7: ocm: offchip memory rd_wr failure\n" "\t\t\t 8: mbx: mailbox command failure\n" "\t\t\t 9: heartbeat failure\n" "\t\t\t A: temperature failure\n" "\t\t\t 11: m_getcl or m_getjcl failure\n" "\t\t\t 13: Invalid Descriptor Count in SGL Receive\n" "\t\t\t 14: Invalid Descriptor Count in LRO Receive\n" "\t\t\t 15: peer port error recovery failure\n" "\t\t\t 16: tx_buf[next_prod_index].mbuf != NULL\n" ); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, (void *)ha, 0, qla_sysctl_stop_pegs, "I", "Peg Stop"); #endif /* #ifdef QL_DBG */ ha->hw.user_pri_nic = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic, ha->hw.user_pri_nic, "VLAN Tag User Priority for Normal Ethernet Packets"); ha->hw.user_pri_iscsi = 4; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi, ha->hw.user_pri_iscsi, "VLAN Tag User Priority for iSCSI Packets"); qlnx_add_hw_stats_sysctls(ha); qlnx_add_drvr_stats_sysctls(ha); return; } void ql_hw_link_status(qla_host_t *ha) { device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui); if (ha->hw.link_up) { device_printf(ha->pci_dev, "link Up\n"); } else { device_printf(ha->pci_dev, "link Down\n"); } if (ha->hw.fduplex) { device_printf(ha->pci_dev, "Full Duplex\n"); } else { device_printf(ha->pci_dev, "Half Duplex\n"); } if (ha->hw.autoneg) { device_printf(ha->pci_dev, "Auto Negotiation Enabled\n"); } else { device_printf(ha->pci_dev, "Auto Negotiation Disabled\n"); } switch (ha->hw.link_speed) { case 0x710: device_printf(ha->pci_dev, "link speed\t\t 10Gps\n"); break; case 0x3E8: device_printf(ha->pci_dev, "link speed\t\t 1Gps\n"); break; case 0x64: device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n"); break; default: device_printf(ha->pci_dev, "link speed\t\t Unknown\n"); break; } switch (ha->hw.module_type) { case 0x01: device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n"); break; case 0x02: device_printf(ha->pci_dev, "Module Type 10GBase-LR\n"); break; case 0x03: device_printf(ha->pci_dev, "Module Type 10GBase-SR\n"); break; case 0x04: device_printf(ha->pci_dev, "Module Type 10GE Passive Copper(Compliant)[%d m]\n", ha->hw.cable_length); break; case 0x05: device_printf(ha->pci_dev, "Module Type 10GE Active" " Limiting Copper(Compliant)[%d m]\n", ha->hw.cable_length); break; case 0x06: device_printf(ha->pci_dev, "Module Type 10GE Passive Copper" " (Legacy, Best Effort)[%d m]\n", ha->hw.cable_length); break; case 0x07: device_printf(ha->pci_dev, "Module Type 1000Base-SX\n"); break; case 0x08: device_printf(ha->pci_dev, "Module Type 1000Base-LX\n"); break; case 0x09: device_printf(ha->pci_dev, "Module Type 1000Base-CX\n"); break; case 0x0A: device_printf(ha->pci_dev, "Module Type 1000Base-T\n"); break; case 0x0B: device_printf(ha->pci_dev, "Module Type 1GE Passive Copper" "(Legacy, Best Effort)\n"); break; default: device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n", ha->hw.module_type); break; } if (ha->hw.link_faults == 1) device_printf(ha->pci_dev, "SFP Power Fault\n"); } /* * Name: ql_free_dma * Function: Frees the DMA'able memory allocated in ql_alloc_dma() */ void ql_free_dma(qla_host_t *ha) { uint32_t i; if (ha->hw.dma_buf.flags.sds_ring) { for (i = 0; i < ha->hw.num_sds_rings; i++) { ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); } ha->hw.dma_buf.flags.sds_ring = 0; } if (ha->hw.dma_buf.flags.rds_ring) { for (i = 0; i < ha->hw.num_rds_rings; i++) { ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); } ha->hw.dma_buf.flags.rds_ring = 0; } if (ha->hw.dma_buf.flags.tx_ring) { ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); ha->hw.dma_buf.flags.tx_ring = 0; } ql_minidump_free(ha); } /* * Name: ql_alloc_dma * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. */ int ql_alloc_dma(qla_host_t *ha) { device_t dev; uint32_t i, j, size, tx_ring_size; qla_hw_t *hw; qla_hw_tx_cntxt_t *tx_cntxt; uint8_t *vaddr; bus_addr_t paddr; dev = ha->pci_dev; QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); hw = &ha->hw; /* * Allocate Transmit Ring */ tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS); size = (tx_ring_size * ha->hw.num_tx_rings); hw->dma_buf.tx_ring.alignment = 8; hw->dma_buf.tx_ring.size = size + PAGE_SIZE; if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) { device_printf(dev, "%s: tx ring alloc failed\n", __func__); goto ql_alloc_dma_exit; } vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b; paddr = hw->dma_buf.tx_ring.dma_addr; for (i = 0; i < ha->hw.num_tx_rings; i++) { tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr; tx_cntxt->tx_ring_paddr = paddr; vaddr += tx_ring_size; paddr += tx_ring_size; } for (i = 0; i < ha->hw.num_tx_rings; i++) { tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; tx_cntxt->tx_cons = (uint32_t *)vaddr; tx_cntxt->tx_cons_paddr = paddr; vaddr += sizeof (uint32_t); paddr += sizeof (uint32_t); } ha->hw.dma_buf.flags.tx_ring = 1; QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n", __func__, (void *)(hw->dma_buf.tx_ring.dma_addr), hw->dma_buf.tx_ring.dma_b)); /* * Allocate Receive Descriptor Rings */ for (i = 0; i < hw->num_rds_rings; i++) { hw->dma_buf.rds_ring[i].alignment = 8; hw->dma_buf.rds_ring[i].size = (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) { device_printf(dev, "%s: rds ring[%d] alloc failed\n", __func__, i); for (j = 0; j < i; j++) ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]); goto ql_alloc_dma_exit; } QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n", __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr), hw->dma_buf.rds_ring[i].dma_b)); } hw->dma_buf.flags.rds_ring = 1; /* * Allocate Status Descriptor Rings */ for (i = 0; i < hw->num_sds_rings; i++) { hw->dma_buf.sds_ring[i].alignment = 8; hw->dma_buf.sds_ring[i].size = (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) { device_printf(dev, "%s: sds ring alloc failed\n", __func__); for (j = 0; j < i; j++) ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]); goto ql_alloc_dma_exit; } QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n", __func__, i, (void *)(hw->dma_buf.sds_ring[i].dma_addr), hw->dma_buf.sds_ring[i].dma_b)); } for (i = 0; i < hw->num_sds_rings; i++) { hw->sds[i].sds_ring_base = (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; } hw->dma_buf.flags.sds_ring = 1; return 0; ql_alloc_dma_exit: ql_free_dma(ha); return -1; } #define Q8_MBX_MSEC_DELAY 5000 static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause) { uint32_t i; uint32_t data; int ret = 0; uint64_t start_usecs; uint64_t end_usecs; uint64_t msecs_200; ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]); if (ha->offline || ha->qla_initiate_recovery) { ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0); goto exit_qla_mbx_cmd; } if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) && (((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))|| !(ha->err_inject & ~0xFFFF))) { ret = -3; QL_INITIATE_RECOVERY(ha); goto exit_qla_mbx_cmd; } start_usecs = qla_get_usec_timestamp(); if (no_pause) i = 1000; else i = Q8_MBX_MSEC_DELAY; while (i) { if (ha->qla_initiate_recovery) { ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); return (-1); } data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL); if (data == 0) break; if (no_pause) { DELAY(1000); } else { qla_mdelay(__func__, 1); } i--; } if (i == 0) { device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n", __func__, data); ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0); ret = -1; ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++; QL_INITIATE_RECOVERY(ha); goto exit_qla_mbx_cmd; } for (i = 0; i < n_hmbox; i++) { WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox); h_mbox++; } WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1); i = Q8_MBX_MSEC_DELAY; while (i) { if (ha->qla_initiate_recovery) { ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); return (-1); } data = READ_REG32(ha, Q8_FW_MBOX_CNTRL); if ((data & 0x3) == 1) { data = READ_REG32(ha, Q8_FW_MBOX0); if ((data & 0xF000) != 0x8000) break; } if (no_pause) { DELAY(1000); } else { qla_mdelay(__func__, 1); } i--; } if (i == 0) { device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n", __func__, data); ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0); ret = -2; ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++; QL_INITIATE_RECOVERY(ha); goto exit_qla_mbx_cmd; } for (i = 0; i < n_fwmbox; i++) { if (ha->qla_initiate_recovery) { ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); return (-1); } *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2))); } WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0); WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); end_usecs = qla_get_usec_timestamp(); if (end_usecs > start_usecs) { msecs_200 = (end_usecs - start_usecs)/(1000 * 200); if (msecs_200 < 15) ha->hw.mbx_comp_msecs[msecs_200]++; else if (msecs_200 < 20) ha->hw.mbx_comp_msecs[15]++; else { device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__, start_usecs, end_usecs, msecs_200); ha->hw.mbx_comp_msecs[16]++; } } ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]); exit_qla_mbx_cmd: return (ret); } int qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb, uint32_t *num_rcvq) { uint32_t *mbox, err; device_t dev = ha->pci_dev; bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX)); mbox = ha->hw.mbox; mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } err = mbox[0] >> 25; if (supports_9kb != NULL) { if (mbox[16] & 0x80) /* bit 7 of mbox 16 */ *supports_9kb = 1; else *supports_9kb = 0; } if (num_rcvq != NULL) *num_rcvq = ((mbox[6] >> 16) & 0xFFFF); if ((err != 1) && (err != 0)) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, uint32_t create) { uint32_t i, err; device_t dev = ha->pci_dev; q80_config_intr_t *c_intr; q80_config_intr_rsp_t *c_intr_rsp; c_intr = (q80_config_intr_t *)ha->hw.mbox; bzero(c_intr, (sizeof (q80_config_intr_t))); c_intr->opcode = Q8_MBX_CONFIG_INTR; c_intr->count_version = (sizeof (q80_config_intr_t) >> 2); c_intr->count_version |= Q8_MBX_CMD_VERSION; c_intr->nentries = num_intrs; for (i = 0; i < num_intrs; i++) { if (create) { c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE; c_intr->intr[i].msix_index = start_idx + 1 + i; } else { c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE; c_intr->intr[i].msix_index = ha->hw.intr_id[(start_idx + i)]; } c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X; } if (qla_mbx_cmd(ha, (uint32_t *)c_intr, (sizeof (q80_config_intr_t) >> 2), ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) { device_printf(dev, "%s: %s failed0\n", __func__, (create ? "create" : "delete")); return (-1); } c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status); if (err) { device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__, (create ? "create" : "delete"), err, c_intr_rsp->nentries); for (i = 0; i < c_intr_rsp->nentries; i++) { device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n", __func__, i, c_intr_rsp->intr[i].status, c_intr_rsp->intr[i].intr_id, c_intr_rsp->intr[i].intr_src); } return (-1); } for (i = 0; ((i < num_intrs) && create); i++) { if (!c_intr_rsp->intr[i].status) { ha->hw.intr_id[(start_idx + i)] = c_intr_rsp->intr[i].intr_id; ha->hw.intr_src[(start_idx + i)] = c_intr_rsp->intr[i].intr_src; } } return (0); } /* * Name: qla_config_rss * Function: Configure RSS for the context/interface. */ static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id) { q80_config_rss_t *c_rss; q80_config_rss_rsp_t *c_rss_rsp; uint32_t err, i; device_t dev = ha->pci_dev; c_rss = (q80_config_rss_t *)ha->hw.mbox; bzero(c_rss, (sizeof (q80_config_rss_t))); c_rss->opcode = Q8_MBX_CONFIG_RSS; c_rss->count_version = (sizeof (q80_config_rss_t) >> 2); c_rss->count_version |= Q8_MBX_CMD_VERSION; c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP | Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP); //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP | // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP); c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS; c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE; c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK; c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID; c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS; c_rss->cntxt_id = cntxt_id; for (i = 0; i < 5; i++) { c_rss->rss_key[i] = rss_key[i]; } if (qla_mbx_cmd(ha, (uint32_t *)c_rss, (sizeof (q80_config_rss_t) >> 2), ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } static int qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count, uint16_t cntxt_id, uint8_t *ind_table) { q80_config_rss_ind_table_t *c_rss_ind; q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp; uint32_t err; device_t dev = ha->pci_dev; if ((count > Q8_RSS_IND_TBL_SIZE) || ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) { device_printf(dev, "%s: illegal count [%d, %d]\n", __func__, start_idx, count); return (-1); } c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox; bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t)); c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE; c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2); c_rss_ind->count_version |= Q8_MBX_CMD_VERSION; c_rss_ind->start_idx = start_idx; c_rss_ind->end_idx = start_idx + count - 1; c_rss_ind->cntxt_id = cntxt_id; bcopy(ind_table, c_rss_ind->ind_table, count); if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind, (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox, (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } /* * Name: qla_config_intr_coalesce * Function: Configure Interrupt Coalescing. */ static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, int rcv) { q80_config_intr_coalesc_t *intrc; q80_config_intr_coalesc_rsp_t *intrc_rsp; uint32_t err, i; device_t dev = ha->pci_dev; intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox; bzero(intrc, (sizeof (q80_config_intr_coalesc_t))); intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE; intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2); intrc->count_version |= Q8_MBX_CMD_VERSION; if (rcv) { intrc->flags = Q8_MBX_INTRC_FLAGS_RCV; intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF; intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF; } else { intrc->flags = Q8_MBX_INTRC_FLAGS_XMT; intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF; intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF; } intrc->cntxt_id = cntxt_id; if (tenable) { intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC; intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC; for (i = 0; i < ha->hw.num_sds_rings; i++) { intrc->sds_ring_mask |= (1 << i); } intrc->ms_timeout = 1000; } if (qla_mbx_cmd(ha, (uint32_t *)intrc, (sizeof (q80_config_intr_coalesc_t) >> 2), ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } /* * Name: qla_config_mac_addr * Function: binds a MAC address to the context/interface. * Can be unicast, multicast or broadcast. */ static int qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac, uint32_t num_mac) { q80_config_mac_addr_t *cmac; q80_config_mac_addr_rsp_t *cmac_rsp; uint32_t err; device_t dev = ha->pci_dev; int i; uint8_t *mac_cpy = mac_addr; if (num_mac > Q8_MAX_MAC_ADDRS) { device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n", __func__, (add_mac ? "Add" : "Del"), num_mac); return (-1); } cmac = (q80_config_mac_addr_t *)ha->hw.mbox; bzero(cmac, (sizeof (q80_config_mac_addr_t))); cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR; cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2; cmac->count_version |= Q8_MBX_CMD_VERSION; if (add_mac) cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR; else cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR; cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS; cmac->nmac_entries = num_mac; cmac->cntxt_id = ha->hw.rcv_cntxt_id; for (i = 0; i < num_mac; i++) { bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); mac_addr = mac_addr + ETHER_ADDR_LEN; } if (qla_mbx_cmd(ha, (uint32_t *)cmac, (sizeof (q80_config_mac_addr_t) >> 2), ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) { device_printf(dev, "%s: %s failed0\n", __func__, (add_mac ? "Add" : "Del")); return (-1); } cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status); if (err) { device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__, (add_mac ? "Add" : "Del"), err); for (i = 0; i < num_mac; i++) { device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2], mac_cpy[3], mac_cpy[4], mac_cpy[5]); mac_cpy += ETHER_ADDR_LEN; } return (-1); } return 0; } /* * Name: qla_set_mac_rcv_mode * Function: Enable/Disable AllMulticast and Promiscous Modes. */ static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode) { q80_config_mac_rcv_mode_t *rcv_mode; uint32_t err; q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp; device_t dev = ha->pci_dev; rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox; bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t))); rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE; rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2; rcv_mode->count_version |= Q8_MBX_CMD_VERSION; rcv_mode->mode = mode; rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode, (sizeof (q80_config_mac_rcv_mode_t) >> 2), ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return 0; } int ql_set_promisc(qla_host_t *ha) { int ret; ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE; ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); return (ret); } void qla_reset_promisc(qla_host_t *ha) { ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE; (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); } int ql_set_allmulti(qla_host_t *ha) { int ret; ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE; ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); return (ret); } void qla_reset_allmulti(qla_host_t *ha) { ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE; (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); } /* * Name: ql_set_max_mtu * Function: * Sets the maximum transfer unit size for the specified rcv context. */ int ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) { device_t dev; q80_set_max_mtu_t *max_mtu; q80_set_max_mtu_rsp_t *max_mtu_rsp; uint32_t err; dev = ha->pci_dev; max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox; bzero(max_mtu, (sizeof (q80_set_max_mtu_t))); max_mtu->opcode = Q8_MBX_SET_MAX_MTU; max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2); max_mtu->count_version |= Q8_MBX_CMD_VERSION; max_mtu->cntxt_id = cntxt_id; max_mtu->mtu = mtu; if (qla_mbx_cmd(ha, (uint32_t *)max_mtu, (sizeof (q80_set_max_mtu_t) >> 2), ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) { device_printf(dev, "%s: failed\n", __func__); return -1; } max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id) { device_t dev; q80_link_event_t *lnk; q80_link_event_rsp_t *lnk_rsp; uint32_t err; dev = ha->pci_dev; lnk = (q80_link_event_t *)ha->hw.mbox; bzero(lnk, (sizeof (q80_link_event_t))); lnk->opcode = Q8_MBX_LINK_EVENT_REQ; lnk->count_version = (sizeof (q80_link_event_t) >> 2); lnk->count_version |= Q8_MBX_CMD_VERSION; lnk->cntxt_id = cntxt_id; lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC; if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2), ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id) { device_t dev; q80_config_fw_lro_t *fw_lro; q80_config_fw_lro_rsp_t *fw_lro_rsp; uint32_t err; dev = ha->pci_dev; fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox; bzero(fw_lro, sizeof(q80_config_fw_lro_t)); fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO; fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2); fw_lro->count_version |= Q8_MBX_CMD_VERSION; fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK; fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK; fw_lro->cntxt_id = cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)fw_lro, (sizeof (q80_config_fw_lro_t) >> 2), ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode) { device_t dev; q80_hw_config_t *hw_config; q80_hw_config_rsp_t *hw_config_rsp; uint32_t err; dev = ha->pci_dev; hw_config = (q80_hw_config_t *)ha->hw.mbox; bzero(hw_config, sizeof (q80_hw_config_t)); hw_config->opcode = Q8_MBX_HW_CONFIG; hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT; hw_config->count_version |= Q8_MBX_CMD_VERSION; hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE; hw_config->u.set_cam_search_mode.mode = search_mode; if (qla_mbx_cmd(ha, (uint32_t *)hw_config, (sizeof (q80_hw_config_t) >> 2), ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_get_cam_search_mode(qla_host_t *ha) { device_t dev; q80_hw_config_t *hw_config; q80_hw_config_rsp_t *hw_config_rsp; uint32_t err; dev = ha->pci_dev; hw_config = (q80_hw_config_t *)ha->hw.mbox; bzero(hw_config, sizeof (q80_hw_config_t)); hw_config->opcode = Q8_MBX_HW_CONFIG; hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT; hw_config->count_version |= Q8_MBX_CMD_VERSION; hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE; if (qla_mbx_cmd(ha, (uint32_t *)hw_config, (sizeof (q80_hw_config_t) >> 2), ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } else { device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__, hw_config_rsp->u.get_cam_search_mode.mode); } return 0; } static int qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size) { device_t dev; q80_get_stats_t *stat; q80_get_stats_rsp_t *stat_rsp; uint32_t err; dev = ha->pci_dev; stat = (q80_get_stats_t *)ha->hw.mbox; bzero(stat, (sizeof (q80_get_stats_t))); stat->opcode = Q8_MBX_GET_STATS; stat->count_version = 2; stat->count_version |= Q8_MBX_CMD_VERSION; stat->cmd = cmd; if (qla_mbx_cmd(ha, (uint32_t *)stat, 2, ha->hw.mbox, (rsp_size >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status); if (err) { return -1; } return 0; } void ql_get_stats(qla_host_t *ha) { q80_get_stats_rsp_t *stat_rsp; q80_mac_stats_t *mstat; q80_xmt_stats_t *xstat; q80_rcv_stats_t *rstat; uint32_t cmd; int i; struct ifnet *ifp = ha->ifp; if (ifp == NULL) return; if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return; } if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { QLA_UNLOCK(ha, __func__); return; } stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; /* * Get MAC Statistics */ cmd = Q8_GET_STATS_CMD_TYPE_MAC; // cmd |= Q8_GET_STATS_CMD_CLEAR; cmd |= ((ha->pci_func & 0x1) << 16); if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || ha->offline) goto ql_get_stats_exit; if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { mstat = (q80_mac_stats_t *)&stat_rsp->u.mac; bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t)); } else { device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n", __func__, ha->hw.mbox[0]); } /* * Get RCV Statistics */ cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT; // cmd |= Q8_GET_STATS_CMD_CLEAR; cmd |= (ha->hw.rcv_cntxt_id << 16); if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || ha->offline) goto ql_get_stats_exit; if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv; bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t)); } else { device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n", __func__, ha->hw.mbox[0]); } if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || ha->offline) goto ql_get_stats_exit; /* * Get XMT Statistics */ for (i = 0 ; (i < ha->hw.num_tx_rings); i++) { if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || ha->offline) goto ql_get_stats_exit; cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT; // cmd |= Q8_GET_STATS_CMD_CLEAR; cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16); if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t)) == 0) { xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt; bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t)); } else { device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n", __func__, ha->hw.mbox[0]); } } ql_get_stats_exit: QLA_UNLOCK(ha, __func__); return; } /* * Name: qla_tx_tso * Function: Checks if the packet to be transmitted is a candidate for * Large TCP Segment Offload. If yes, the appropriate fields in the Tx * Ring Structure are plugged in. */ static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) { struct ether_vlan_header *eh; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; struct tcphdr *th = NULL; uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off; uint16_t etype, opcode, offload = 1; device_t dev; dev = ha->pci_dev; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } hdrlen = 0; switch (etype) { case ETHERTYPE_IP: tcp_opt_off = ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr); if (mp->m_len < tcp_opt_off) { m_copydata(mp, 0, tcp_opt_off, hdr); ip = (struct ip *)(hdr + ehdrlen); } else { ip = (struct ip *)(mp->m_data + ehdrlen); } ip_hlen = ip->ip_hl << 2; opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; if ((ip->ip_p != IPPROTO_TCP) || (ip_hlen != sizeof (struct ip))){ /* IP Options are not supported */ offload = 0; } else th = (struct tcphdr *)((caddr_t)ip + ip_hlen); break; case ETHERTYPE_IPV6: tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) + sizeof (struct tcphdr); if (mp->m_len < tcp_opt_off) { m_copydata(mp, 0, tcp_opt_off, hdr); ip6 = (struct ip6_hdr *)(hdr + ehdrlen); } else { ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); } ip_hlen = sizeof(struct ip6_hdr); opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6; if (ip6->ip6_nxt != IPPROTO_TCP) { //device_printf(dev, "%s: ipv6\n", __func__); offload = 0; } else th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); break; default: QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__)); offload = 0; break; } if (!offload) return (-1); tcp_hlen = th->th_off << 2; hdrlen = ehdrlen + ip_hlen + tcp_hlen; if (mp->m_len < hdrlen) { if (mp->m_len < tcp_opt_off) { if (tcp_hlen > sizeof(struct tcphdr)) { m_copydata(mp, tcp_opt_off, (tcp_hlen - sizeof(struct tcphdr)), &hdr[tcp_opt_off]); } } else { m_copydata(mp, 0, hdrlen, hdr); } } tx_cmd->mss = mp->m_pkthdr.tso_segsz; tx_cmd->flags_opcode = opcode ; tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; tx_cmd->total_hdr_len = hdrlen; /* Check for Multicast least significant bit of MSB == 1 */ if (eh->evl_dhost[0] & 0x01) { tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST; } if (mp->m_len < hdrlen) { printf("%d\n", hdrlen); return (1); } return (0); } /* * Name: qla_tx_chksum * Function: Checks if the packet to be transmitted is a candidate for * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx * Ring Structure are plugged in. */ static int qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code, uint32_t *tcp_hdr_off) { struct ether_vlan_header *eh; struct ip *ip; struct ip6_hdr *ip6; uint32_t ehdrlen, ip_hlen; uint16_t etype, opcode, offload = 1; device_t dev; uint8_t buf[sizeof(struct ip6_hdr)]; dev = ha->pci_dev; *op_code = 0; if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0) return (-1); eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); ip_hlen = sizeof (struct ip); if (mp->m_len < (ehdrlen + ip_hlen)) { m_copydata(mp, ehdrlen, sizeof(struct ip), buf); ip = (struct ip *)buf; } if (ip->ip_p == IPPROTO_TCP) opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; else if (ip->ip_p == IPPROTO_UDP) opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; else { //device_printf(dev, "%s: ipv4\n", __func__); offload = 0; } break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); ip_hlen = sizeof(struct ip6_hdr); if (mp->m_len < (ehdrlen + ip_hlen)) { m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), buf); ip6 = (struct ip6_hdr *)buf; } if (ip6->ip6_nxt == IPPROTO_TCP) opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; else if (ip6->ip6_nxt == IPPROTO_UDP) opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; else { //device_printf(dev, "%s: ipv6\n", __func__); offload = 0; } break; default: offload = 0; break; } if (!offload) return (-1); *op_code = opcode; *tcp_hdr_off = (ip_hlen + ehdrlen); return (0); } #define QLA_TX_MIN_FREE 2 /* * Name: ql_hw_send * Function: Transmits a packet. It first checks if the packet is a * candidate for Large TCP Segment Offload and then for UDP/TCP checksum * offload. If either of these creteria are not met, it is transmitted * as a regular ethernet frame. */ int ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu) { struct ether_vlan_header *eh; qla_hw_t *hw = &ha->hw; q80_tx_cmd_t *tx_cmd, tso_cmd; bus_dma_segment_t *c_seg; uint32_t num_tx_cmds, hdr_len = 0; uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next; device_t dev; int i, ret; uint8_t *src = NULL, *dst = NULL; uint8_t frame_hdr[QL_FRAME_HDR_SIZE]; uint32_t op_code = 0; uint32_t tcp_hdr_off = 0; dev = ha->pci_dev; /* * Always make sure there is atleast one empty slot in the tx_ring * tx_ring is considered full when there only one entry available */ num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; total_length = mp->m_pkthdr.len; if (total_length > QLA_MAX_TSO_FRAME_SIZE) { device_printf(dev, "%s: total length exceeds maxlen(%d)\n", __func__, total_length); return (EINVAL); } eh = mtod(mp, struct ether_vlan_header *); if (mp->m_pkthdr.csum_flags & CSUM_TSO) { bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); src = frame_hdr; ret = qla_tx_tso(ha, mp, &tso_cmd, src); if (!(ret & ~1)) { /* find the additional tx_cmd descriptors required */ if (mp->m_flags & M_VLANTAG) tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN; hdr_len = tso_cmd.total_hdr_len; bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; bytes = QL_MIN(bytes, hdr_len); num_tx_cmds++; hdr_len -= bytes; while (hdr_len) { bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); hdr_len -= bytes; num_tx_cmds++; } hdr_len = tso_cmd.total_hdr_len; if (ret == 0) src = (uint8_t *)eh; } else return (EINVAL); } else { (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off); } if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { ql_hw_tx_done_locked(ha, txr_idx); if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= " "(num_tx_cmds + QLA_TX_MIN_FREE))\n", __func__)); return (-1); } } for (i = 0; i < num_tx_cmds; i++) { int j; j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1); if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) { QL_ASSERT(ha, 0, \ ("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\ __func__, __LINE__, txr_idx, j,\ ha->tx_ring[txr_idx].tx_buf[j].m_head)); return (EINVAL); } } tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx]; if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) { if (nsegs > ha->hw.max_tx_segs) ha->hw.max_tx_segs = nsegs; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); if (op_code) { tx_cmd->flags_opcode = op_code; tx_cmd->tcp_hdr_off = tcp_hdr_off; } else { tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; } } else { bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); ha->tx_tso_frames++; } if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; if (iscsi_pdu) eh->evl_tag |= ha->hw.user_pri_iscsi << 13; } else if (mp->m_flags & M_VLANTAG) { if (hdr_len) { /* TSO */ tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | Q8_TX_CMD_FLAGS_HW_VLAN_ID); tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN; } else tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID; ha->hw_vlan_tx_frames++; tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; if (iscsi_pdu) { tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13; mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci; } } tx_cmd->n_bufs = (uint8_t)nsegs; tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); c_seg = segs; while (1) { for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { switch (i) { case 0: tx_cmd->buf1_addr = c_seg->ds_addr; tx_cmd->buf1_len = c_seg->ds_len; break; case 1: tx_cmd->buf2_addr = c_seg->ds_addr; tx_cmd->buf2_len = c_seg->ds_len; break; case 2: tx_cmd->buf3_addr = c_seg->ds_addr; tx_cmd->buf3_len = c_seg->ds_len; break; case 3: tx_cmd->buf4_addr = c_seg->ds_addr; tx_cmd->buf4_len = c_seg->ds_len; break; } c_seg++; nsegs--; } txr_next = hw->tx_cntxt[txr_idx].txr_next = (hw->tx_cntxt[txr_idx].txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; if (!nsegs) break; tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); } if (mp->m_pkthdr.csum_flags & CSUM_TSO) { /* TSO : Copy the header in the following tx cmd descriptors */ txr_next = hw->tx_cntxt[txr_idx].txr_next; tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; bytes = QL_MIN(bytes, hdr_len); dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; if (mp->m_flags & M_VLANTAG) { /* first copy the src/dst MAC addresses */ bcopy(src, dst, (ETHER_ADDR_LEN * 2)); dst += (ETHER_ADDR_LEN * 2); src += (ETHER_ADDR_LEN * 2); *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); dst += 2; *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag); dst += 2; /* bytes left in src header */ hdr_len -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); /* bytes left in TxCmd Entry */ bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); bcopy(src, dst, bytes); src += bytes; hdr_len -= bytes; } else { bcopy(src, dst, bytes); src += bytes; hdr_len -= bytes; } txr_next = hw->tx_cntxt[txr_idx].txr_next = (hw->tx_cntxt[txr_idx].txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; while (hdr_len) { tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); bcopy(src, tx_cmd, bytes); src += bytes; hdr_len -= bytes; txr_next = hw->tx_cntxt[txr_idx].txr_next = (hw->tx_cntxt[txr_idx].txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); tx_cmd_count++; } } hw->tx_cntxt[txr_idx].txr_free = hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count; QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\ txr_idx); QL_DPRINT8(ha, (dev, "%s: return\n", __func__)); return (0); } #define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */ static int qla_config_rss_ind_table(qla_host_t *ha) { uint32_t i, count; uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE]; for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) { rss_ind_tbl[i] = i % ha->hw.num_sds_rings; } for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; i = i + Q8_CONFIG_IND_TBL_SIZE) { if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) { count = Q8_RSS_IND_TBL_MAX_IDX - i + 1; } else { count = Q8_CONFIG_IND_TBL_SIZE; } if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id, rss_ind_tbl)) return (-1); } return (0); } static int qla_config_soft_lro(qla_host_t *ha) { int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; bzero(lro, sizeof(struct lro_ctrl)); #if (__FreeBSD_version >= 1100101) if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) { device_printf(ha->pci_dev, "%s: tcp_lro_init_args [%d] failed\n", __func__, i); return (-1); } #else if (tcp_lro_init(lro)) { device_printf(ha->pci_dev, "%s: tcp_lro_init [%d] failed\n", __func__, i); return (-1); } #endif /* #if (__FreeBSD_version >= 1100101) */ lro->ifp = ha->ifp; } QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__)); return (0); } static void qla_drain_soft_lro(qla_host_t *ha) { int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; #if (__FreeBSD_version >= 1100101) tcp_lro_flush_all(lro); #else struct lro_entry *queued; while ((!SLIST_EMPTY(&lro->lro_active))) { queued = SLIST_FIRST(&lro->lro_active); SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif /* #if (__FreeBSD_version >= 1100101) */ } return; } static void qla_free_soft_lro(qla_host_t *ha) { int i; qla_hw_t *hw = &ha->hw; struct lro_ctrl *lro; for (i = 0; i < hw->num_sds_rings; i++) { lro = &hw->sds[i].lro; tcp_lro_free(lro); } return; } /* * Name: ql_del_hw_if * Function: Destroys the hardware specific entities corresponding to an * Ethernet Interface */ void ql_del_hw_if(qla_host_t *ha) { uint32_t i; uint32_t num_msix; (void)qla_stop_nic_func(ha); qla_del_rcv_cntxt(ha); if(qla_del_xmt_cntxt(ha)) goto ql_del_hw_if_exit; if (ha->hw.flags.init_intr_cnxt) { for (i = 0; i < ha->hw.num_sds_rings; ) { if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) num_msix = Q8_MAX_INTR_VECTORS; else num_msix = ha->hw.num_sds_rings - i; if (qla_config_intr_cntxt(ha, i, num_msix, 0)) break; i += num_msix; } ha->hw.flags.init_intr_cnxt = 0; } ql_del_hw_if_exit: if (ha->hw.enable_soft_lro) { qla_drain_soft_lro(ha); qla_free_soft_lro(ha); } return; } void qla_confirm_9kb_enable(qla_host_t *ha) { // uint32_t supports_9kb = 0; ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX); /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */ WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2); WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); #if 0 qla_get_nic_partition(ha, &supports_9kb, NULL); if (!supports_9kb) #endif ha->hw.enable_9kb = 0; return; } /* * Name: ql_init_hw_if * Function: Creates the hardware specific entities corresponding to an * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address * corresponding to the interface. Enables LRO if allowed. */ int ql_init_hw_if(qla_host_t *ha) { device_t dev; uint32_t i; uint8_t bcast_mac[6]; qla_rdesc_t *rdesc; uint32_t num_msix; dev = ha->pci_dev; for (i = 0; i < ha->hw.num_sds_rings; i++) { bzero(ha->hw.dma_buf.sds_ring[i].dma_b, ha->hw.dma_buf.sds_ring[i].size); } for (i = 0; i < ha->hw.num_sds_rings; ) { if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) num_msix = Q8_MAX_INTR_VECTORS; else num_msix = ha->hw.num_sds_rings - i; if (qla_config_intr_cntxt(ha, i, num_msix, 1)) { if (i > 0) { num_msix = i; for (i = 0; i < num_msix; ) { qla_config_intr_cntxt(ha, i, Q8_MAX_INTR_VECTORS, 0); i += Q8_MAX_INTR_VECTORS; } } return (-1); } i = i + num_msix; } ha->hw.flags.init_intr_cnxt = 1; /* * Create Receive Context */ if (qla_init_rcv_cntxt(ha)) { return (-1); } for (i = 0; i < ha->hw.num_rds_rings; i++) { rdesc = &ha->hw.rds[i]; rdesc->rx_next = NUM_RX_DESCRIPTORS - 2; rdesc->rx_in = 0; /* Update the RDS Producer Indices */ QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\ rdesc->rx_next); } /* * Create Transmit Context */ if (qla_init_xmt_cntxt(ha)) { qla_del_rcv_cntxt(ha); return (-1); } ha->hw.max_tx_segs = 0; if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1)) return(-1); ha->hw.flags.unicast_mac = 1; bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; if (qla_config_mac_addr(ha, bcast_mac, 1, 1)) return (-1); ha->hw.flags.bcast_mac = 1; /* * program any cached multicast addresses */ if (qla_hw_add_all_mcast(ha)) return (-1); if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id)) return (-1); if (qla_config_rss(ha, ha->hw.rcv_cntxt_id)) return (-1); if (qla_config_rss_ind_table(ha)) return (-1); if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1)) return (-1); if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id)) return (-1); if (ha->ifp->if_capenable & IFCAP_LRO) { if (ha->hw.enable_hw_lro) { ha->hw.enable_soft_lro = 0; if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id)) return (-1); } else { ha->hw.enable_soft_lro = 1; if (qla_config_soft_lro(ha)) return (-1); } } if (qla_init_nic_func(ha)) return (-1); if (qla_query_fw_dcbx_caps(ha)) return (-1); for (i = 0; i < ha->hw.num_sds_rings; i++) QL_ENABLE_INTERRUPTS(ha, i); return (0); } static int qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx) { device_t dev = ha->pci_dev; q80_rq_map_sds_to_rds_t *map_rings; q80_rsp_map_sds_to_rds_t *map_rings_rsp; uint32_t i, err; qla_hw_t *hw = &ha->hw; map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox; bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t)); map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS; map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2); map_rings->count_version |= Q8_MBX_CMD_VERSION; map_rings->cntxt_id = hw->rcv_cntxt_id; map_rings->num_rings = num_idx; for (i = 0; i < num_idx; i++) { map_rings->sds_rds[i].sds_ring = i + start_idx; map_rings->sds_rds[i].rds_ring = i + start_idx; } if (qla_mbx_cmd(ha, (uint32_t *)map_rings, (sizeof (q80_rq_map_sds_to_rds_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return (0); } /* * Name: qla_init_rcv_cntxt * Function: Creates the Receive Context. */ static int qla_init_rcv_cntxt(qla_host_t *ha) { q80_rq_rcv_cntxt_t *rcntxt; q80_rsp_rcv_cntxt_t *rcntxt_rsp; q80_stat_desc_t *sdesc; int i, j; qla_hw_t *hw = &ha->hw; device_t dev; uint32_t err; uint32_t rcntxt_sds_rings; uint32_t rcntxt_rds_rings; uint32_t max_idx; dev = ha->pci_dev; /* * Create Receive Context */ for (i = 0; i < hw->num_sds_rings; i++) { sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { sdesc->data[0] = 1ULL; sdesc->data[1] = 1ULL; } } rcntxt_sds_rings = hw->num_sds_rings; if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS; rcntxt_rds_rings = hw->num_rds_rings; if (hw->num_rds_rings > MAX_RDS_RING_SETS) rcntxt_rds_rings = MAX_RDS_RING_SETS; rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox; bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t))); rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT; rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2); rcntxt->count_version |= Q8_MBX_CMD_VERSION; rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW | Q8_RCV_CNTXT_CAP0_LRO | Q8_RCV_CNTXT_CAP0_HW_LRO | Q8_RCV_CNTXT_CAP0_RSS | Q8_RCV_CNTXT_CAP0_SGL_LRO; if (ha->hw.enable_9kb) rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO; else rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO; if (ha->hw.num_rds_rings > 1) { rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5); rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS; } else rcntxt->nrds_sets_rings = 0x1 | (1 << 5); rcntxt->nsds_rings = rcntxt_sds_rings; rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE; rcntxt->rcv_vpid = 0; for (i = 0; i < rcntxt_sds_rings; i++) { rcntxt->sds[i].paddr = qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); rcntxt->sds[i].size = qla_host_to_le32(NUM_STATUS_DESCRIPTORS); rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]); rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0); } for (i = 0; i < rcntxt_rds_rings; i++) { rcntxt->rds[i].paddr_std = qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); if (ha->hw.enable_9kb) rcntxt->rds[i].std_bsize = qla_host_to_le64(MJUM9BYTES); else rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); rcntxt->rds[i].std_nentries = qla_host_to_le32(NUM_RX_DESCRIPTORS); } if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, (sizeof (q80_rq_rcv_cntxt_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } for (i = 0; i < rcntxt_sds_rings; i++) { hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i]; } for (i = 0; i < rcntxt_rds_rings; i++) { hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std; } hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id; ha->hw.flags.init_rx_cnxt = 1; if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) { for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) { if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings) max_idx = MAX_RCNTXT_SDS_RINGS; else max_idx = hw->num_sds_rings - i; err = qla_add_rcv_rings(ha, i, max_idx); if (err) return -1; i += max_idx; } } if (hw->num_rds_rings > 1) { for (i = 0; i < hw->num_rds_rings; ) { if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings) max_idx = MAX_SDS_TO_RDS_MAP; else max_idx = hw->num_rds_rings - i; err = qla_map_sds_to_rds(ha, i, max_idx); if (err) return -1; i += max_idx; } } return (0); } static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds) { device_t dev = ha->pci_dev; q80_rq_add_rcv_rings_t *add_rcv; q80_rsp_add_rcv_rings_t *add_rcv_rsp; uint32_t i,j, err; qla_hw_t *hw = &ha->hw; add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox; bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t)); add_rcv->opcode = Q8_MBX_ADD_RX_RINGS; add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2); add_rcv->count_version |= Q8_MBX_CMD_VERSION; add_rcv->nrds_sets_rings = nsds | (1 << 5); add_rcv->nsds_rings = nsds; add_rcv->cntxt_id = hw->rcv_cntxt_id; for (i = 0; i < nsds; i++) { j = i + sds_idx; add_rcv->sds[i].paddr = qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr); add_rcv->sds[i].size = qla_host_to_le32(NUM_STATUS_DESCRIPTORS); add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]); add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0); } for (i = 0; (i < nsds); i++) { j = i + sds_idx; add_rcv->rds[i].paddr_std = qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr); if (ha->hw.enable_9kb) add_rcv->rds[i].std_bsize = qla_host_to_le64(MJUM9BYTES); else add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); add_rcv->rds[i].std_nentries = qla_host_to_le32(NUM_RX_DESCRIPTORS); } if (qla_mbx_cmd(ha, (uint32_t *)add_rcv, (sizeof (q80_rq_add_rcv_rings_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } for (i = 0; i < nsds; i++) { hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i]; } for (i = 0; i < nsds; i++) { hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std; } return (0); } /* * Name: qla_del_rcv_cntxt * Function: Destroys the Receive Context. */ static void qla_del_rcv_cntxt(qla_host_t *ha) { device_t dev = ha->pci_dev; q80_rcv_cntxt_destroy_t *rcntxt; q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp; uint32_t err; uint8_t bcast_mac[6]; if (!ha->hw.flags.init_rx_cnxt) return; if (qla_hw_del_all_mcast(ha)) return; if (ha->hw.flags.bcast_mac) { bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; if (qla_config_mac_addr(ha, bcast_mac, 0, 1)) return; ha->hw.flags.bcast_mac = 0; } if (ha->hw.flags.unicast_mac) { if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1)) return; ha->hw.flags.unicast_mac = 0; } rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox; bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t))); rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT; rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2); rcntxt->count_version |= Q8_MBX_CMD_VERSION; rcntxt->cntxt_id = ha->hw.rcv_cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, (sizeof (q80_rcv_cntxt_destroy_t) >> 2), ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return; } rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); } ha->hw.flags.init_rx_cnxt = 0; return; } /* * Name: qla_init_xmt_cntxt * Function: Creates the Transmit Context. */ static int qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) { device_t dev; qla_hw_t *hw = &ha->hw; q80_rq_tx_cntxt_t *tcntxt; q80_rsp_tx_cntxt_t *tcntxt_rsp; uint32_t err; qla_hw_tx_cntxt_t *hw_tx_cntxt; uint32_t intr_idx; hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; dev = ha->pci_dev; /* * Create Transmit Context */ tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox; bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t))); tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT; tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2); tcntxt->count_version |= Q8_MBX_CMD_VERSION; intr_idx = txr_idx; #ifdef QL_ENABLE_ISCSI_TLV tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO | Q8_TX_CNTXT_CAP0_TC; if (txr_idx >= (ha->hw.num_tx_rings >> 1)) { tcntxt->traffic_class = 1; } intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1); #else tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO; #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ tcntxt->ntx_rings = 1; tcntxt->tx_ring[0].paddr = qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr); tcntxt->tx_ring[0].tx_consumer = qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr); tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS); tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]); tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0); hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS; hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0; *(hw_tx_cntxt->tx_cons) = 0; if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, (sizeof (q80_rq_tx_cntxt_t) >> 2), ha->hw.mbox, (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return -1; } hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index; hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id; if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0)) return (-1); return (0); } /* * Name: qla_del_xmt_cntxt * Function: Destroys the Transmit Context. */ static int qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) { device_t dev = ha->pci_dev; q80_tx_cntxt_destroy_t *tcntxt; q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp; uint32_t err; tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox; bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t))); tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT; tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2); tcntxt->count_version |= Q8_MBX_CMD_VERSION; tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id; if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, (sizeof (q80_tx_cntxt_destroy_t) >> 2), ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed0\n", __func__); return (-1); } tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); return (-1); } return (0); } static int qla_del_xmt_cntxt(qla_host_t *ha) { uint32_t i; int ret = 0; if (!ha->hw.flags.init_tx_cnxt) return (ret); for (i = 0; i < ha->hw.num_tx_rings; i++) { if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0) break; } ha->hw.flags.init_tx_cnxt = 0; return (ret); } static int qla_init_xmt_cntxt(qla_host_t *ha) { uint32_t i, j; for (i = 0; i < ha->hw.num_tx_rings; i++) { if (qla_init_xmt_cntxt_i(ha, i) != 0) { for (j = 0; j < i; j++) { if (qla_del_xmt_cntxt_i(ha, j)) break; } return (-1); } } ha->hw.flags.init_tx_cnxt = 1; return (0); } static int qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast) { int i, nmcast; uint32_t count = 0; uint8_t *mcast; nmcast = ha->hw.nmcast; QL_DPRINT2(ha, (ha->pci_dev, "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast)); mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) { if ((ha->hw.mcast[i].addr[0] != 0) || (ha->hw.mcast[i].addr[1] != 0) || (ha->hw.mcast[i].addr[2] != 0) || (ha->hw.mcast[i].addr[3] != 0) || (ha->hw.mcast[i].addr[4] != 0) || (ha->hw.mcast[i].addr[5] != 0)) { bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN); mcast = mcast + ETHER_ADDR_LEN; count++; device_printf(ha->pci_dev, "%s: %x:%x:%x:%x:%x:%x \n", __func__, ha->hw.mcast[i].addr[0], ha->hw.mcast[i].addr[1], ha->hw.mcast[i].addr[2], ha->hw.mcast[i].addr[3], ha->hw.mcast[i].addr[4], ha->hw.mcast[i].addr[5]); if (count == Q8_MAX_MAC_ADDRS) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } count = 0; mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); } nmcast--; } } if (count) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } } QL_DPRINT2(ha, (ha->pci_dev, "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast)); return 0; } static int qla_hw_add_all_mcast(qla_host_t *ha) { int ret; ret = qla_hw_all_mcast(ha, 1); return (ret); } int qla_hw_del_all_mcast(qla_host_t *ha) { int ret; ret = qla_hw_all_mcast(ha, 0); bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS)); ha->hw.nmcast = 0; return (ret); } static int qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) return (0); /* its been already added */ } return (-1); } static int qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if ((ha->hw.mcast[i].addr[0] == 0) && (ha->hw.mcast[i].addr[1] == 0) && (ha->hw.mcast[i].addr[2] == 0) && (ha->hw.mcast[i].addr[3] == 0) && (ha->hw.mcast[i].addr[4] == 0) && (ha->hw.mcast[i].addr[5] == 0)) { bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN); ha->hw.nmcast++; mta = mta + ETHER_ADDR_LEN; nmcast--; if (nmcast == 0) break; } } return 0; } static int qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) { int i; for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) { ha->hw.mcast[i].addr[0] = 0; ha->hw.mcast[i].addr[1] = 0; ha->hw.mcast[i].addr[2] = 0; ha->hw.mcast[i].addr[3] = 0; ha->hw.mcast[i].addr[4] = 0; ha->hw.mcast[i].addr[5] = 0; ha->hw.nmcast--; mta = mta + ETHER_ADDR_LEN; nmcast--; if (nmcast == 0) break; } } return 0; } /* * Name: ql_hw_set_multi * Function: Sets the Multicast Addresses provided by the host O.S into the * hardware (for the given interface) */ int ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt, uint32_t add_mac) { uint8_t *mta = mcast_addr; int i; int ret = 0; uint32_t count = 0; uint8_t *mcast; mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); for (i = 0; i < mcnt; i++) { if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) { if (add_mac) { if (qla_hw_mac_addr_present(ha, mta) != 0) { bcopy(mta, mcast, ETHER_ADDR_LEN); mcast = mcast + ETHER_ADDR_LEN; count++; } } else { if (qla_hw_mac_addr_present(ha, mta) == 0) { bcopy(mta, mcast, ETHER_ADDR_LEN); mcast = mcast + ETHER_ADDR_LEN; count++; } } } if (count == Q8_MAX_MAC_ADDRS) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } if (add_mac) { qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count); } else { qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count); } count = 0; mcast = ha->hw.mac_addr_arr; memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); } mta += Q8_MAC_ADDR_LEN; } if (count) { if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac, count)) { device_printf(ha->pci_dev, "%s: failed\n", __func__); return (-1); } if (add_mac) { qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count); } else { qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count); } } return (ret); } /* * Name: ql_hw_tx_done_locked * Function: Handle Transmit Completions */ void ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) { qla_tx_buf_t *txb; qla_hw_t *hw = &ha->hw; uint32_t comp_idx, comp_count = 0; qla_hw_tx_cntxt_t *hw_tx_cntxt; hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; /* retrieve index of last entry in tx ring completed */ comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons)); while (comp_idx != hw_tx_cntxt->txr_comp) { txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp]; hw_tx_cntxt->txr_comp++; if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS) hw_tx_cntxt->txr_comp = 0; comp_count++; if (txb->m_head) { if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1); bus_dmamap_sync(ha->tx_tag, txb->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->m_head = NULL; } } hw_tx_cntxt->txr_free += comp_count; if (hw_tx_cntxt->txr_free > NUM_TX_DESCRIPTORS) device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d txr_free = %d" "txr_next = %d txr_comp = %d\n", __func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp); QL_ASSERT(ha, (hw_tx_cntxt->txr_free <= NUM_TX_DESCRIPTORS), \ ("%s [%d]: txr_idx = %d txr_free = %d txr_next = %d txr_comp = %d\n",\ __func__, __LINE__, txr_idx, hw_tx_cntxt->txr_free, \ hw_tx_cntxt->txr_next, hw_tx_cntxt->txr_comp)); return; } void ql_update_link_state(qla_host_t *ha) { uint32_t link_state = 0; uint32_t prev_link_state; prev_link_state = ha->hw.link_up; if (ha->ifp->if_drv_flags & IFF_DRV_RUNNING) { link_state = READ_REG32(ha, Q8_LINK_STATE); if (ha->pci_func == 0) { link_state = (((link_state & 0xF) == 1)? 1 : 0); } else { link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); } } atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state); if (prev_link_state != ha->hw.link_up) { if (ha->hw.link_up) { if_link_state_change(ha->ifp, LINK_STATE_UP); } else { if_link_state_change(ha->ifp, LINK_STATE_DOWN); } } return; } int ql_hw_check_health(qla_host_t *ha) { uint32_t val; ha->hw.health_count++; if (ha->hw.health_count < 500) return 0; ha->hw.health_count = 0; val = READ_REG32(ha, Q8_ASIC_TEMPERATURE); if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) || (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) { device_printf(ha->pci_dev, "%s: Temperature Alert" " at ts_usecs %ld ts_reg = 0x%08x\n", __func__, qla_get_usec_timestamp(), val); if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE) ha->hw.sp_log_stop = -1; QL_INITIATE_RECOVERY(ha); return -1; } val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT); if ((val != ha->hw.hbeat_value) && (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) { ha->hw.hbeat_value = val; ha->hw.hbeat_failure = 0; return 0; } ha->hw.hbeat_failure++; if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1)) device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n", __func__, val); if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */ return 0; else { uint32_t peg_halt_status1; uint32_t peg_halt_status2; peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1); peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2); device_printf(ha->pci_dev, "%s: Heartbeat Failue at ts_usecs = %ld " "fw_heart_beat = 0x%08x " "peg_halt_status1 = 0x%08x " "peg_halt_status2 = 0x%08x\n", __func__, qla_get_usec_timestamp(), val, peg_halt_status1, peg_halt_status2); if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE) ha->hw.sp_log_stop = -1; } QL_INITIATE_RECOVERY(ha); return -1; } static int qla_init_nic_func(qla_host_t *ha) { device_t dev; q80_init_nic_func_t *init_nic; q80_init_nic_func_rsp_t *init_nic_rsp; uint32_t err; dev = ha->pci_dev; init_nic = (q80_init_nic_func_t *)ha->hw.mbox; bzero(init_nic, sizeof(q80_init_nic_func_t)); init_nic->opcode = Q8_MBX_INIT_NIC_FUNC; init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2); init_nic->count_version |= Q8_MBX_CMD_VERSION; init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN; init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN; init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN; //qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t)); if (qla_mbx_cmd(ha, (uint32_t *)init_nic, (sizeof (q80_init_nic_func_t) >> 2), ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox; // qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t)); err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } else { device_printf(dev, "%s: successful\n", __func__); } return 0; } static int qla_stop_nic_func(qla_host_t *ha) { device_t dev; q80_stop_nic_func_t *stop_nic; q80_stop_nic_func_rsp_t *stop_nic_rsp; uint32_t err; dev = ha->pci_dev; stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox; bzero(stop_nic, sizeof(q80_stop_nic_func_t)); stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC; stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2); stop_nic->count_version |= Q8_MBX_CMD_VERSION; stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN; stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN; //qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t)); if (qla_mbx_cmd(ha, (uint32_t *)stop_nic, (sizeof (q80_stop_nic_func_t) >> 2), ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox; //qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t)); err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_query_fw_dcbx_caps(qla_host_t *ha) { device_t dev; q80_query_fw_dcbx_caps_t *fw_dcbx; q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp; uint32_t err; dev = ha->pci_dev; fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox; bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t)); fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS; fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2); fw_dcbx->count_version |= Q8_MBX_CMD_VERSION; ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t)); if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx, (sizeof (q80_query_fw_dcbx_caps_t) >> 2), ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox; ql_dump_buf8(ha, __func__, fw_dcbx_rsp, sizeof (q80_query_fw_dcbx_caps_rsp_t)); err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); } return 0; } static int qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2, uint32_t aen_mb3, uint32_t aen_mb4) { device_t dev; q80_idc_ack_t *idc_ack; q80_idc_ack_rsp_t *idc_ack_rsp; uint32_t err; int count = 300; dev = ha->pci_dev; idc_ack = (q80_idc_ack_t *)ha->hw.mbox; bzero(idc_ack, sizeof(q80_idc_ack_t)); idc_ack->opcode = Q8_MBX_IDC_ACK; idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2); idc_ack->count_version |= Q8_MBX_CMD_VERSION; idc_ack->aen_mb1 = aen_mb1; idc_ack->aen_mb2 = aen_mb2; idc_ack->aen_mb3 = aen_mb3; idc_ack->aen_mb4 = aen_mb4; ha->hw.imd_compl= 0; if (qla_mbx_cmd(ha, (uint32_t *)idc_ack, (sizeof (q80_idc_ack_t) >> 2), ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } while (count && !ha->hw.imd_compl) { qla_mdelay(__func__, 100); count--; } if (!count) return -1; else device_printf(dev, "%s: count %d\n", __func__, count); return (0); } static int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits) { device_t dev; q80_set_port_cfg_t *pcfg; q80_set_port_cfg_rsp_t *pfg_rsp; uint32_t err; int count = 300; dev = ha->pci_dev; pcfg = (q80_set_port_cfg_t *)ha->hw.mbox; bzero(pcfg, sizeof(q80_set_port_cfg_t)); pcfg->opcode = Q8_MBX_SET_PORT_CONFIG; pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2); pcfg->count_version |= Q8_MBX_CMD_VERSION; pcfg->cfg_bits = cfg_bits; device_printf(dev, "%s: cfg_bits" " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" " [0x%x, 0x%x, 0x%x]\n", __func__, ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)); ha->hw.imd_compl= 0; if (qla_mbx_cmd(ha, (uint32_t *)pcfg, (sizeof (q80_set_port_cfg_t) >> 2), ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status); if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) { while (count && !ha->hw.imd_compl) { qla_mdelay(__func__, 100); count--; } if (count) { device_printf(dev, "%s: count %d\n", __func__, count); err = 0; } } if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } return (0); } static int qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size) { uint32_t err; device_t dev = ha->pci_dev; q80_config_md_templ_size_t *md_size; q80_config_md_templ_size_rsp_t *md_size_rsp; #ifndef QL_LDFLASH_FW ql_minidump_template_hdr_t *hdr; hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump; *size = hdr->size_of_template; return (0); #endif /* #ifdef QL_LDFLASH_FW */ md_size = (q80_config_md_templ_size_t *) ha->hw.mbox; bzero(md_size, sizeof(q80_config_md_templ_size_t)); md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE; md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2); md_size->count_version |= Q8_MBX_CMD_VERSION; if (qla_mbx_cmd(ha, (uint32_t *) md_size, (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox, (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return (-1); } md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox; err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } *size = md_size_rsp->templ_size; return (0); } static int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits) { device_t dev; q80_get_port_cfg_t *pcfg; q80_get_port_cfg_rsp_t *pcfg_rsp; uint32_t err; dev = ha->pci_dev; pcfg = (q80_get_port_cfg_t *)ha->hw.mbox; bzero(pcfg, sizeof(q80_get_port_cfg_t)); pcfg->opcode = Q8_MBX_GET_PORT_CONFIG; pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2); pcfg->count_version |= Q8_MBX_CMD_VERSION; if (qla_mbx_cmd(ha, (uint32_t *)pcfg, (sizeof (q80_get_port_cfg_t) >> 2), ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return -1; } pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox; err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return(-1); } device_printf(dev, "%s: [cfg_bits, port type]" " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" " [0x%x, 0x%x, 0x%x]\n", __func__, pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type, ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0) ); *cfg_bits = pcfg_rsp->cfg_bits; return (0); } int ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp) { struct ether_vlan_header *eh; uint16_t etype; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; struct tcphdr *th = NULL; uint32_t hdrlen; uint32_t offset; uint8_t buf[sizeof(struct ip6_hdr)]; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { hdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } if (etype == ETHERTYPE_IP) { offset = (hdrlen + sizeof (struct ip)); if (mp->m_len >= offset) { ip = (struct ip *)(mp->m_data + hdrlen); } else { m_copydata(mp, hdrlen, sizeof (struct ip), buf); ip = (struct ip *)buf; } if (ip->ip_p == IPPROTO_TCP) { hdrlen += ip->ip_hl << 2; offset = hdrlen + 4; if (mp->m_len >= offset) { - th = (struct tcphdr *)(mp->m_data + hdrlen);; + th = (struct tcphdr *)(mp->m_data + hdrlen); } else { m_copydata(mp, hdrlen, 4, buf); th = (struct tcphdr *)buf; } } } else if (etype == ETHERTYPE_IPV6) { offset = (hdrlen + sizeof (struct ip6_hdr)); if (mp->m_len >= offset) { ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen); } else { m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf); ip6 = (struct ip6_hdr *)buf; } if (ip6->ip6_nxt == IPPROTO_TCP) { hdrlen += sizeof(struct ip6_hdr); offset = hdrlen + 4; if (mp->m_len >= offset) { - th = (struct tcphdr *)(mp->m_data + hdrlen);; + th = (struct tcphdr *)(mp->m_data + hdrlen); } else { m_copydata(mp, hdrlen, 4, buf); th = (struct tcphdr *)buf; } } } if (th != NULL) { if ((th->th_sport == htons(3260)) || (th->th_dport == htons(3260))) return 0; } return (-1); } void qla_hw_async_event(qla_host_t *ha) { switch (ha->hw.aen_mb0) { case 0x8101: (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2, ha->hw.aen_mb3, ha->hw.aen_mb4); break; default: break; } return; } #ifdef QL_LDFLASH_FW static int ql_get_minidump_template(qla_host_t *ha) { uint32_t err; device_t dev = ha->pci_dev; q80_config_md_templ_cmd_t *md_templ; q80_config_md_templ_cmd_rsp_t *md_templ_rsp; md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox; bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t))); md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT; md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2); md_templ->count_version |= Q8_MBX_CMD_VERSION; md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr; md_templ->buff_size = ha->hw.dma_buf.minidump.size; if (qla_mbx_cmd(ha, (uint32_t *) md_templ, (sizeof(q80_config_md_templ_cmd_t) >> 2), ha->hw.mbox, (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) { device_printf(dev, "%s: failed\n", __func__); return (-1); } md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox; err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status); if (err) { device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); return (-1); } return (0); } #endif /* #ifdef QL_LDFLASH_FW */ /* * Minidump related functionality */ static int ql_parse_template(qla_host_t *ha); static uint32_t ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t *crb_entry, uint32_t * data_buff); static uint32_t ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry, uint32_t * data_buff); static uint32_t ql_pollrd_modify_write(qla_host_t *ha, ql_minidump_entry_rd_modify_wr_with_poll_t *entry, uint32_t *data_buff); static uint32_t ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t * data_buff); static uint32_t ql_L1Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t *data_buff); static uint32_t ql_rdocm(qla_host_t *ha, ql_minidump_entry_rdocm_t *ocmEntry, uint32_t *data_buff); static uint32_t ql_rdmem(qla_host_t *ha, ql_minidump_entry_rdmem_t *mem_entry, uint32_t *data_buff); static uint32_t ql_rdrom(qla_host_t *ha, ql_minidump_entry_rdrom_t *romEntry, uint32_t *data_buff); static uint32_t ql_rdmux(qla_host_t *ha, ql_minidump_entry_mux_t *muxEntry, uint32_t *data_buff); static uint32_t ql_rdmux2(qla_host_t *ha, ql_minidump_entry_mux2_t *muxEntry, uint32_t *data_buff); static uint32_t ql_rdqueue(qla_host_t *ha, ql_minidump_entry_queue_t *queueEntry, uint32_t *data_buff); static uint32_t ql_cntrl(qla_host_t *ha, ql_minidump_template_hdr_t *template_hdr, ql_minidump_entry_cntrl_t *crbEntry); static uint32_t ql_minidump_size(qla_host_t *ha) { uint32_t i, k; uint32_t size = 0; ql_minidump_template_hdr_t *hdr; hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b; i = 0x2; for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) { if (i & ha->hw.mdump_capture_mask) size += hdr->capture_size_array[k]; i = i << 1; } return (size); } static void ql_free_minidump_buffer(qla_host_t *ha) { if (ha->hw.mdump_buffer != NULL) { free(ha->hw.mdump_buffer, M_QLA83XXBUF); ha->hw.mdump_buffer = NULL; ha->hw.mdump_buffer_size = 0; } return; } static int ql_alloc_minidump_buffer(qla_host_t *ha) { ha->hw.mdump_buffer_size = ql_minidump_size(ha); if (!ha->hw.mdump_buffer_size) return (-1); ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF, M_NOWAIT); if (ha->hw.mdump_buffer == NULL) return (-1); return (0); } static void ql_free_minidump_template_buffer(qla_host_t *ha) { if (ha->hw.mdump_template != NULL) { free(ha->hw.mdump_template, M_QLA83XXBUF); ha->hw.mdump_template = NULL; ha->hw.mdump_template_size = 0; } return; } static int ql_alloc_minidump_template_buffer(qla_host_t *ha) { ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size; ha->hw.mdump_template = malloc(ha->hw.mdump_template_size, M_QLA83XXBUF, M_NOWAIT); if (ha->hw.mdump_template == NULL) return (-1); return (0); } static int ql_alloc_minidump_buffers(qla_host_t *ha) { int ret; ret = ql_alloc_minidump_template_buffer(ha); if (ret) return (ret); ret = ql_alloc_minidump_buffer(ha); if (ret) ql_free_minidump_template_buffer(ha); return (ret); } static uint32_t ql_validate_minidump_checksum(qla_host_t *ha) { uint64_t sum = 0; int count; uint32_t *template_buff; count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t); template_buff = ha->hw.dma_buf.minidump.dma_b; while (count-- > 0) { sum += *template_buff++; } while (sum >> 32) { sum = (sum & 0xFFFFFFFF) + (sum >> 32); } return (~sum); } int ql_minidump_init(qla_host_t *ha) { int ret = 0; uint32_t template_size = 0; device_t dev = ha->pci_dev; /* * Get Minidump Template Size */ ret = qla_get_minidump_tmplt_size(ha, &template_size); if (ret || (template_size == 0)) { device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret, template_size); return (-1); } /* * Allocate Memory for Minidump Template */ ha->hw.dma_buf.minidump.alignment = 8; ha->hw.dma_buf.minidump.size = template_size; #ifdef QL_LDFLASH_FW if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) { device_printf(dev, "%s: minidump dma alloc failed\n", __func__); return (-1); } ha->hw.dma_buf.flags.minidump = 1; /* * Retrieve Minidump Template */ ret = ql_get_minidump_template(ha); #else ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump; #endif /* #ifdef QL_LDFLASH_FW */ if (ret == 0) { ret = ql_validate_minidump_checksum(ha); if (ret == 0) { ret = ql_alloc_minidump_buffers(ha); if (ret == 0) ha->hw.mdump_init = 1; else device_printf(dev, "%s: ql_alloc_minidump_buffers" " failed\n", __func__); } else { device_printf(dev, "%s: ql_validate_minidump_checksum" " failed\n", __func__); } } else { device_printf(dev, "%s: ql_get_minidump_template failed\n", __func__); } if (ret) ql_minidump_free(ha); return (ret); } static void ql_minidump_free(qla_host_t *ha) { ha->hw.mdump_init = 0; if (ha->hw.dma_buf.flags.minidump) { ha->hw.dma_buf.flags.minidump = 0; ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump); } ql_free_minidump_template_buffer(ha); ql_free_minidump_buffer(ha); return; } void ql_minidump(qla_host_t *ha) { if (!ha->hw.mdump_init) return; if (ha->hw.mdump_done) return; ha->hw.mdump_usec_ts = qla_get_usec_timestamp(); ha->hw.mdump_start_seq_index = ql_stop_sequence(ha); bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size); bzero(ha->hw.mdump_template, ha->hw.mdump_template_size); bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template, ha->hw.mdump_template_size); ql_parse_template(ha); ql_start_sequence(ha, ha->hw.mdump_start_seq_index); ha->hw.mdump_done = 1; return; } /* * helper routines */ static void ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize) { if (esize != entry->hdr.entry_capture_size) { entry->hdr.entry_capture_size = esize; entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG; } return; } static int ql_parse_template(qla_host_t *ha) { uint32_t num_of_entries, buff_level, e_cnt, esize; uint32_t end_cnt, rv = 0; char *dump_buff, *dbuff; int sane_start = 0, sane_end = 0; ql_minidump_template_hdr_t *template_hdr; ql_minidump_entry_t *entry; uint32_t capture_mask; uint32_t dump_size; /* Setup parameters */ template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template; if (template_hdr->entry_type == TLHDR) sane_start = 1; dump_buff = (char *) ha->hw.mdump_buffer; num_of_entries = template_hdr->num_of_entries; entry = (ql_minidump_entry_t *) ((char *)template_hdr + template_hdr->first_entry_offset ); template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] = template_hdr->ocm_window_array[ha->pci_func]; template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func; capture_mask = ha->hw.mdump_capture_mask; dump_size = ha->hw.mdump_buffer_size; template_hdr->driver_capture_mask = capture_mask; QL_DPRINT80(ha, (ha->pci_dev, "%s: sane_start = %d num_of_entries = %d " "capture_mask = 0x%x dump_size = %d \n", __func__, sane_start, num_of_entries, capture_mask, dump_size)); for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) { /* * If the capture_mask of the entry does not match capture mask * skip the entry after marking the driver_flags indicator. */ if (!(entry->hdr.entry_capture_mask & capture_mask)) { entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; entry = (ql_minidump_entry_t *) ((char *) entry + entry->hdr.entry_size); continue; } /* * This is ONLY needed in implementations where * the capture buffer allocated is too small to capture * all of the required entries for a given capture mask. * We need to empty the buffer contents to a file * if possible, before processing the next entry * If the buff_full_flag is set, no further capture will happen * and all remaining non-control entries will be skipped. */ if (entry->hdr.entry_capture_size != 0) { if ((buff_level + entry->hdr.entry_capture_size) > dump_size) { /* Try to recover by emptying buffer to file */ entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; entry = (ql_minidump_entry_t *) ((char *) entry + entry->hdr.entry_size); continue; } } /* * Decode the entry type and process it accordingly */ switch (entry->hdr.entry_type) { case RDNOP: break; case RDEND: if (sane_end == 0) { end_cnt = e_cnt; } sane_end++; break; case RDCRB: dbuff = dump_buff + buff_level; esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case POLLRD: dbuff = dump_buff + buff_level; esize = ql_pollrd(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case POLLRDMWR: dbuff = dump_buff + buff_level; esize = ql_pollrd_modify_write(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case L2ITG: case L2DTG: case L2DAT: case L2INS: dbuff = dump_buff + buff_level; esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff); if (esize == -1) { entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; } else { ql_entry_err_chk(entry, esize); buff_level += esize; } break; case L1DAT: case L1INS: dbuff = dump_buff + buff_level; esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDOCM: dbuff = dump_buff + buff_level; esize = ql_rdocm(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDMEM: dbuff = dump_buff + buff_level; esize = ql_rdmem(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case BOARD: case RDROM: dbuff = dump_buff + buff_level; esize = ql_rdrom(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDMUX: dbuff = dump_buff + buff_level; esize = ql_rdmux(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case RDMUX2: dbuff = dump_buff + buff_level; esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case QUEUE: dbuff = dump_buff + buff_level; esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff); ql_entry_err_chk(entry, esize); buff_level += esize; break; case CNTRL: if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) { entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; } break; default: entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; break; } /* next entry in the template */ entry = (ql_minidump_entry_t *) ((char *) entry + entry->hdr.entry_size); } if (!sane_start || (sane_end > 1)) { device_printf(ha->pci_dev, "\n%s: Template configuration error. Check Template\n", __func__); } QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n", __func__, template_hdr->num_of_entries)); return 0; } /* * Read CRB operation. */ static uint32_t ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry, uint32_t * data_buff) { int loop_cnt; int ret; uint32_t op_count, addr, stride, value = 0; addr = crb_entry->addr; op_count = crb_entry->op_count; stride = crb_entry->addr_stride; for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, addr, &value, 1); if (ret) return (0); *data_buff++ = addr; *data_buff++ = value; addr = addr + stride; } /* * for testing purpose we return amount of data written */ return (op_count * (2 * sizeof(uint32_t))); } /* * Handle L2 Cache. */ static uint32_t ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t * data_buff) { int i, k; int loop_cnt; int ret; uint32_t read_value; uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w; uint32_t tag_value, read_cnt; volatile uint8_t cntl_value_r; long timeout; uint32_t data; loop_cnt = cacheEntry->op_count; read_addr = cacheEntry->read_addr; cntrl_addr = cacheEntry->control_addr; cntl_value_w = (uint32_t) cacheEntry->write_value; tag_reg_addr = cacheEntry->tag_reg_addr; tag_value = cacheEntry->init_tag_value; read_cnt = cacheEntry->read_addr_cnt; for (i = 0; i < loop_cnt; i++) { ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); if (ret) return (0); if (cacheEntry->write_value != 0) { ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0); if (ret) return (0); } if (cacheEntry->poll_mask != 0) { timeout = cacheEntry->poll_wait; ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1); if (ret) return (0); cntl_value_r = (uint8_t)data; while ((cntl_value_r & cacheEntry->poll_mask) != 0) { if (timeout) { qla_mdelay(__func__, 1); timeout--; } else break; ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1); if (ret) return (0); cntl_value_r = (uint8_t)data; } if (!timeout) { /* Report timeout error. * core dump capture failed * Skip remaining entries. * Write buffer out to file * Use driver specific fields in template header * to report this error. */ return (-1); } } addr = read_addr; for (k = 0; k < read_cnt; k++) { ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); *data_buff++ = read_value; addr += cacheEntry->read_addr_stride; } tag_value += cacheEntry->tag_value_stride; } return (read_cnt * loop_cnt * sizeof(uint32_t)); } /* * Handle L1 Cache. */ static uint32_t ql_L1Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, uint32_t *data_buff) { int ret; int i, k; int loop_cnt; uint32_t read_value; uint32_t addr, read_addr, cntrl_addr, tag_reg_addr; uint32_t tag_value, read_cnt; uint32_t cntl_value_w; loop_cnt = cacheEntry->op_count; read_addr = cacheEntry->read_addr; cntrl_addr = cacheEntry->control_addr; cntl_value_w = (uint32_t) cacheEntry->write_value; tag_reg_addr = cacheEntry->tag_reg_addr; tag_value = cacheEntry->init_tag_value; read_cnt = cacheEntry->read_addr_cnt; for (i = 0; i < loop_cnt; i++) { ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0); if (ret) return (0); addr = read_addr; for (k = 0; k < read_cnt; k++) { ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); *data_buff++ = read_value; addr += cacheEntry->read_addr_stride; } tag_value += cacheEntry->tag_value_stride; } return (read_cnt * loop_cnt * sizeof(uint32_t)); } /* * Reading OCM memory */ static uint32_t ql_rdocm(qla_host_t *ha, ql_minidump_entry_rdocm_t *ocmEntry, uint32_t *data_buff) { int i, loop_cnt; volatile uint32_t addr; volatile uint32_t value; addr = ocmEntry->read_addr; loop_cnt = ocmEntry->op_count; for (i = 0; i < loop_cnt; i++) { value = READ_REG32(ha, addr); *data_buff++ = value; addr += ocmEntry->read_addr_stride; } return (loop_cnt * sizeof(value)); } /* * Read memory */ static uint32_t ql_rdmem(qla_host_t *ha, ql_minidump_entry_rdmem_t *mem_entry, uint32_t *data_buff) { int ret; int i, loop_cnt; volatile uint32_t addr; q80_offchip_mem_val_t val; addr = mem_entry->read_addr; /* size in bytes / 16 */ loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4); for (i = 0; i < loop_cnt; i++) { ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1); if (ret) return (0); *data_buff++ = val.data_lo; *data_buff++ = val.data_hi; *data_buff++ = val.data_ulo; *data_buff++ = val.data_uhi; addr += (sizeof(uint32_t) * 4); } return (loop_cnt * (sizeof(uint32_t) * 4)); } /* * Read Rom */ static uint32_t ql_rdrom(qla_host_t *ha, ql_minidump_entry_rdrom_t *romEntry, uint32_t *data_buff) { int ret; int i, loop_cnt; uint32_t addr; uint32_t value; addr = romEntry->read_addr; loop_cnt = romEntry->read_data_size; /* This is size in bytes */ loop_cnt /= sizeof(value); for (i = 0; i < loop_cnt; i++) { ret = ql_rd_flash32(ha, addr, &value); if (ret) return (0); *data_buff++ = value; addr += sizeof(value); } return (loop_cnt * sizeof(value)); } /* * Read MUX data */ static uint32_t ql_rdmux(qla_host_t *ha, ql_minidump_entry_mux_t *muxEntry, uint32_t *data_buff) { int ret; int loop_cnt; uint32_t read_value, sel_value; uint32_t read_addr, select_addr; select_addr = muxEntry->select_addr; sel_value = muxEntry->select_value; read_addr = muxEntry->read_addr; for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = sel_value; *data_buff++ = read_value; sel_value += muxEntry->select_value_stride; } return (loop_cnt * (2 * sizeof(uint32_t))); } static uint32_t ql_rdmux2(qla_host_t *ha, ql_minidump_entry_mux2_t *muxEntry, uint32_t *data_buff) { int ret; int loop_cnt; uint32_t select_addr_1, select_addr_2; uint32_t select_value_1, select_value_2; uint32_t select_value_count, select_value_mask; uint32_t read_addr, read_value; select_addr_1 = muxEntry->select_addr_1; select_addr_2 = muxEntry->select_addr_2; select_value_1 = muxEntry->select_value_1; select_value_2 = muxEntry->select_value_2; select_value_count = muxEntry->select_value_count; select_value_mask = muxEntry->select_value_mask; read_addr = muxEntry->read_addr; for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count; loop_cnt++) { uint32_t temp_sel_val; ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0); if (ret) return (0); temp_sel_val = select_value_1 & select_value_mask; ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = temp_sel_val; *data_buff++ = read_value; ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0); if (ret) return (0); temp_sel_val = select_value_2 & select_value_mask; ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = temp_sel_val; *data_buff++ = read_value; select_value_1 += muxEntry->select_value_stride; select_value_2 += muxEntry->select_value_stride; } return (loop_cnt * (4 * sizeof(uint32_t))); } /* * Handling Queue State Reads. */ static uint32_t ql_rdqueue(qla_host_t *ha, ql_minidump_entry_queue_t *queueEntry, uint32_t *data_buff) { int ret; int loop_cnt, k; uint32_t read_value; uint32_t read_addr, read_stride, select_addr; uint32_t queue_id, read_cnt; read_cnt = queueEntry->read_addr_cnt; read_stride = queueEntry->read_addr_stride; select_addr = queueEntry->select_addr; for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0); if (ret) return (0); read_addr = queueEntry->read_addr; for (k = 0; k < read_cnt; k++) { ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); if (ret) return (0); *data_buff++ = read_value; read_addr += read_stride; } queue_id += queueEntry->queue_id_stride; } return (loop_cnt * (read_cnt * sizeof(uint32_t))); } /* * Handling control entries. */ static uint32_t ql_cntrl(qla_host_t *ha, ql_minidump_template_hdr_t *template_hdr, ql_minidump_entry_cntrl_t *crbEntry) { int ret; int count; uint32_t opcode, read_value, addr, entry_addr; long timeout; entry_addr = crbEntry->addr; for (count = 0; count < crbEntry->op_count; count++) { opcode = crbEntry->opcode; if (opcode & QL_DBG_OPCODE_WR) { ret = ql_rdwr_indreg32(ha, entry_addr, &crbEntry->value_1, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_WR; } if (opcode & QL_DBG_OPCODE_RW) { ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); if (ret) return (0); ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_RW; } if (opcode & QL_DBG_OPCODE_AND) { ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); if (ret) return (0); read_value &= crbEntry->value_2; opcode &= ~QL_DBG_OPCODE_AND; if (opcode & QL_DBG_OPCODE_OR) { read_value |= crbEntry->value_3; opcode &= ~QL_DBG_OPCODE_OR; } ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); if (ret) return (0); } if (opcode & QL_DBG_OPCODE_OR) { ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); if (ret) return (0); read_value |= crbEntry->value_3; ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_OR; } if (opcode & QL_DBG_OPCODE_POLL) { opcode &= ~QL_DBG_OPCODE_POLL; timeout = crbEntry->poll_timeout; addr = entry_addr; ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); while ((read_value & crbEntry->value_2) != crbEntry->value_1) { if (timeout) { qla_mdelay(__func__, 1); timeout--; } else break; ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); } if (!timeout) { /* * Report timeout error. * core dump capture failed * Skip remaining entries. * Write buffer out to file * Use driver specific fields in template header * to report this error. */ return (-1); } } if (opcode & QL_DBG_OPCODE_RDSTATE) { /* * decide which address to use. */ if (crbEntry->state_index_a) { addr = template_hdr->saved_state_array[ crbEntry-> state_index_a]; } else { addr = entry_addr; } ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); if (ret) return (0); template_hdr->saved_state_array[crbEntry->state_index_v] = read_value; opcode &= ~QL_DBG_OPCODE_RDSTATE; } if (opcode & QL_DBG_OPCODE_WRSTATE) { /* * decide which value to use. */ if (crbEntry->state_index_v) { read_value = template_hdr->saved_state_array[ crbEntry->state_index_v]; } else { read_value = crbEntry->value_1; } /* * decide which address to use. */ if (crbEntry->state_index_a) { addr = template_hdr->saved_state_array[ crbEntry-> state_index_a]; } else { addr = entry_addr; } ret = ql_rdwr_indreg32(ha, addr, &read_value, 0); if (ret) return (0); opcode &= ~QL_DBG_OPCODE_WRSTATE; } if (opcode & QL_DBG_OPCODE_MDSTATE) { /* Read value from saved state using index */ read_value = template_hdr->saved_state_array[ crbEntry->state_index_v]; read_value <<= crbEntry->shl; /*Shift left operation */ read_value >>= crbEntry->shr; /*Shift right operation */ if (crbEntry->value_2) { /* check if AND mask is provided */ read_value &= crbEntry->value_2; } read_value |= crbEntry->value_3; /* OR operation */ read_value += crbEntry->value_1; /* increment op */ /* Write value back to state area. */ template_hdr->saved_state_array[crbEntry->state_index_v] = read_value; opcode &= ~QL_DBG_OPCODE_MDSTATE; } entry_addr += crbEntry->addr_stride; } return (0); } /* * Handling rd poll entry. */ static uint32_t ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry, uint32_t *data_buff) { int ret; int loop_cnt; uint32_t op_count, select_addr, select_value_stride, select_value; uint32_t read_addr, poll, mask, data_size, data; uint32_t wait_count = 0; select_addr = entry->select_addr; read_addr = entry->read_addr; select_value = entry->select_value; select_value_stride = entry->select_value_stride; op_count = entry->op_count; poll = entry->poll; mask = entry->mask; data_size = entry->data_size; for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0); if (ret) return (0); wait_count = 0; while (wait_count < poll) { uint32_t temp; ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1); if (ret) return (0); if ( (temp & mask) != 0 ) { break; } wait_count++; } if (wait_count == poll) { device_printf(ha->pci_dev, "%s: Error in processing entry\n", __func__); device_printf(ha->pci_dev, "%s: wait_count <0x%x> poll <0x%x>\n", __func__, wait_count, poll); return 0; } ret = ql_rdwr_indreg32(ha, read_addr, &data, 1); if (ret) return (0); *data_buff++ = select_value; *data_buff++ = data; select_value = select_value + select_value_stride; } /* * for testing purpose we return amount of data written */ return (loop_cnt * (2 * sizeof(uint32_t))); } /* * Handling rd modify write poll entry. */ static uint32_t ql_pollrd_modify_write(qla_host_t *ha, ql_minidump_entry_rd_modify_wr_with_poll_t *entry, uint32_t *data_buff) { int ret; uint32_t addr_1, addr_2, value_1, value_2, data; uint32_t poll, mask, data_size, modify_mask; uint32_t wait_count = 0; addr_1 = entry->addr_1; addr_2 = entry->addr_2; value_1 = entry->value_1; value_2 = entry->value_2; poll = entry->poll; mask = entry->mask; modify_mask = entry->modify_mask; data_size = entry->data_size; ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0); if (ret) return (0); wait_count = 0; while (wait_count < poll) { uint32_t temp; ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); if (ret) return (0); if ( (temp & mask) != 0 ) { break; } wait_count++; } if (wait_count == poll) { device_printf(ha->pci_dev, "%s Error in processing entry\n", __func__); } else { ret = ql_rdwr_indreg32(ha, addr_2, &data, 1); if (ret) return (0); data = (data & modify_mask); ret = ql_rdwr_indreg32(ha, addr_2, &data, 0); if (ret) return (0); ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0); if (ret) return (0); /* Poll again */ wait_count = 0; while (wait_count < poll) { uint32_t temp; ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); if (ret) return (0); if ( (temp & mask) != 0 ) { break; } wait_count++; } *data_buff++ = addr_2; *data_buff++ = data; } /* * for testing purpose we return amount of data written */ return (2 * sizeof(uint32_t)); } Index: head/sys/dev/smartpqi/smartpqi_queue.c =================================================================== --- head/sys/dev/smartpqi/smartpqi_queue.c (revision 359440) +++ head/sys/dev/smartpqi/smartpqi_queue.c (revision 359441) @@ -1,996 +1,996 @@ /*- * Copyright (c) 2018 Microsemi Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "smartpqi_includes.h" /* * Submit an admin IU to the adapter. * Add interrupt support, if required */ int pqisrc_submit_admin_req(pqisrc_softstate_t *softs, gen_adm_req_iu_t *req, gen_adm_resp_iu_t *resp) { int ret = PQI_STATUS_SUCCESS; ob_queue_t *ob_q = &softs->admin_ob_queue; ib_queue_t *ib_q = &softs->admin_ib_queue; int tmo = PQISRC_ADMIN_CMD_RESP_TIMEOUT; DBG_FUNC("IN\n"); req->header.iu_type = PQI_IU_TYPE_GENERAL_ADMIN_REQUEST; req->header.comp_feature = 0x00; req->header.iu_length = PQI_STANDARD_IU_LENGTH; req->res1 = 0; req->work = 0; /* Get the tag */ req->req_id = pqisrc_get_tag(&softs->taglist); if (INVALID_ELEM == req->req_id) { DBG_ERR("Tag not available0x%x\n",(uint16_t)req->req_id); ret = PQI_STATUS_FAILURE; goto err_out; } softs->rcb[req->req_id].tag = req->req_id; /* Submit the command to the admin ib queue */ ret = pqisrc_submit_cmnd(softs, ib_q, req); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command\n"); goto err_cmd; } /* Wait for completion */ COND_WAIT((*(ob_q->pi_virt_addr) != ob_q->ci_local), tmo); if (tmo <= 0) { DBG_ERR("Admin cmd timeout\n"); DBG_ERR("tmo : %d\n",tmo); \ ret = PQI_STATUS_TIMEOUT; goto err_cmd; } /* Copy the response */ memcpy(resp, ob_q->array_virt_addr + (ob_q->ci_local * ob_q->elem_size), sizeof(gen_adm_resp_iu_t)); /* Update CI */ ob_q->ci_local = (ob_q->ci_local + 1 ) % ob_q->num_elem; PCI_MEM_PUT32(softs, ob_q->ci_register_abs, ob_q->ci_register_offset, LE_32(ob_q->ci_local)); /* Validate the response data */ ASSERT(req->fn_code == resp->fn_code); ASSERT(resp->header.iu_type == PQI_IU_TYPE_GENERAL_ADMIN_RESPONSE); ret = resp->status; if (ret) goto err_cmd; os_reset_rcb(&softs->rcb[req->req_id]); pqisrc_put_tag(&softs->taglist,req->req_id); DBG_FUNC("OUT\n"); return ret; err_cmd: os_reset_rcb(&softs->rcb[req->req_id]); pqisrc_put_tag(&softs->taglist,req->req_id); err_out: DBG_FUNC("failed OUT : %d\n", ret); return ret; } /* * Get the administration queue config parameters. */ void pqisrc_get_admin_queue_config(pqisrc_softstate_t *softs) { uint64_t val = 0; val = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP)); /* pqi_cap = (struct pqi_dev_adminq_cap *)&val;*/ softs->admin_ib_queue.num_elem = val & 0xFF; softs->admin_ob_queue.num_elem = (val & 0xFF00) >> 8; /* Note : size in unit of 16 byte s*/ softs->admin_ib_queue.elem_size = ((val & 0xFF0000) >> 16) * 16; softs->admin_ob_queue.elem_size = ((val & 0xFF000000) >> 24) * 16; DBG_FUNC(" softs->admin_ib_queue.num_elem : %d\n", softs->admin_ib_queue.num_elem); DBG_FUNC(" softs->admin_ib_queue.elem_size : %d\n", softs->admin_ib_queue.elem_size); } /* * Decide the no of elements in admin ib and ob queues. */ void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *softs) { /* Determine num elements in Admin IBQ */ softs->admin_ib_queue.num_elem = MIN(softs->admin_ib_queue.num_elem, PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM); /* Determine num elements in Admin OBQ */ softs->admin_ob_queue.num_elem = MIN(softs->admin_ob_queue.num_elem, PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM); } /* * Allocate DMA memory for admin queue and initialize. */ int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs) { uint32_t ib_array_size = 0; uint32_t ob_array_size = 0; uint32_t alloc_size = 0; char *virt_addr = NULL; dma_addr_t dma_addr = 0; int ret = PQI_STATUS_SUCCESS; ib_array_size = (softs->admin_ib_queue.num_elem * softs->admin_ib_queue.elem_size); ob_array_size = (softs->admin_ob_queue.num_elem * softs->admin_ob_queue.elem_size); alloc_size = ib_array_size + ob_array_size + 2 * sizeof(uint32_t) + PQI_ADDR_ALIGN_MASK_64 + 1; /* for IB CI and OB PI */ /* Allocate memory for Admin Q */ softs->admin_queue_dma_mem.tag = "admin_queue"; softs->admin_queue_dma_mem.size = alloc_size; softs->admin_queue_dma_mem.align = PQI_ADMINQ_ELEM_ARRAY_ALIGN; ret = os_dma_mem_alloc(softs, &softs->admin_queue_dma_mem); if (ret) { DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret); goto err_out; } /* Setup the address */ virt_addr = softs->admin_queue_dma_mem.virt_addr; dma_addr = softs->admin_queue_dma_mem.dma_addr; /* IB */ softs->admin_ib_queue.q_id = 0; softs->admin_ib_queue.array_virt_addr = virt_addr; softs->admin_ib_queue.array_dma_addr = dma_addr; softs->admin_ib_queue.pi_local = 0; /* OB */ softs->admin_ob_queue.q_id = 0; softs->admin_ob_queue.array_virt_addr = virt_addr + ib_array_size; softs->admin_ob_queue.array_dma_addr = dma_addr + ib_array_size; softs->admin_ob_queue.ci_local = 0; /* IB CI */ softs->admin_ib_queue.ci_virt_addr = (uint32_t*)((uint8_t*)softs->admin_ob_queue.array_virt_addr + ob_array_size); softs->admin_ib_queue.ci_dma_addr = (dma_addr_t)((uint8_t*)softs->admin_ob_queue.array_dma_addr + ob_array_size); /* OB PI */ softs->admin_ob_queue.pi_virt_addr = (uint32_t*)((uint8_t*)(softs->admin_ib_queue.ci_virt_addr) + PQI_ADDR_ALIGN_MASK_64 + 1); softs->admin_ob_queue.pi_dma_addr = (dma_addr_t)((uint8_t*)(softs->admin_ib_queue.ci_dma_addr) + PQI_ADDR_ALIGN_MASK_64 + 1); DBG_INIT("softs->admin_ib_queue.ci_dma_addr : %p,softs->admin_ob_queue.pi_dma_addr :%p\n", (void*)softs->admin_ib_queue.ci_dma_addr, (void*)softs->admin_ob_queue.pi_dma_addr ); /* Verify alignment */ ASSERT(!(softs->admin_ib_queue.array_dma_addr & PQI_ADDR_ALIGN_MASK_64)); ASSERT(!(softs->admin_ib_queue.ci_dma_addr & PQI_ADDR_ALIGN_MASK_64)); ASSERT(!(softs->admin_ob_queue.array_dma_addr & PQI_ADDR_ALIGN_MASK_64)); ASSERT(!(softs->admin_ob_queue.pi_dma_addr & PQI_ADDR_ALIGN_MASK_64)); DBG_FUNC("OUT\n"); return ret; err_out: DBG_FUNC("failed OUT\n"); return PQI_STATUS_FAILURE; } /* * Subroutine used to create (or) delete the admin queue requested. */ int pqisrc_create_delete_adminq(pqisrc_softstate_t *softs, uint32_t cmd) { int tmo = 0; int ret = PQI_STATUS_SUCCESS; /* Create Admin Q pair writing to Admin Q config function reg */ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG, LE_64(cmd)); if (cmd == PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR) tmo = PQISRC_ADMIN_QUEUE_CREATE_TIMEOUT; else tmo = PQISRC_ADMIN_QUEUE_DELETE_TIMEOUT; /* Wait for completion */ COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo); if (tmo <= 0) { DBG_ERR("Unable to create/delete admin queue pair\n"); ret = PQI_STATUS_TIMEOUT; } return ret; } /* * Debug admin queue configuration params. */ void pqisrc_print_adminq_config(pqisrc_softstate_t *softs) { DBG_INFO(" softs->admin_ib_queue.array_dma_addr : %p\n", (void*)softs->admin_ib_queue.array_dma_addr); DBG_INFO(" softs->admin_ib_queue.array_virt_addr : %p\n", (void*)softs->admin_ib_queue.array_virt_addr); DBG_INFO(" softs->admin_ib_queue.num_elem : %d\n", softs->admin_ib_queue.num_elem); DBG_INFO(" softs->admin_ib_queue.elem_size : %d\n", softs->admin_ib_queue.elem_size); DBG_INFO(" softs->admin_ob_queue.array_dma_addr : %p\n", (void*)softs->admin_ob_queue.array_dma_addr); DBG_INFO(" softs->admin_ob_queue.array_virt_addr : %p\n", (void*)softs->admin_ob_queue.array_virt_addr); DBG_INFO(" softs->admin_ob_queue.num_elem : %d\n", softs->admin_ob_queue.num_elem); DBG_INFO(" softs->admin_ob_queue.elem_size : %d\n", softs->admin_ob_queue.elem_size); DBG_INFO(" softs->admin_ib_queue.pi_register_abs : %p\n", (void*)softs->admin_ib_queue.pi_register_abs); DBG_INFO(" softs->admin_ob_queue.ci_register_abs : %p\n", (void*)softs->admin_ob_queue.ci_register_abs); } /* * Function used to create an admin queue. */ int pqisrc_create_admin_queue(pqisrc_softstate_t *softs) { - int ret = PQI_STATUS_SUCCESS;; + int ret = PQI_STATUS_SUCCESS; uint32_t admin_q_param = 0; DBG_FUNC("IN\n"); /* Get admin queue details - pqi2-r00a - table 24 */ pqisrc_get_admin_queue_config(softs); /* Decide admin Q config */ pqisrc_decide_admin_queue_config(softs); /* Allocate and init Admin Q pair */ ret = pqisrc_allocate_and_init_adminq(softs); if (ret) { DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret); goto err_out; } /* Write IB Q element array address */ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_elem_array_addr, PQI_ADMIN_IBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ib_queue.array_dma_addr)); /* Write OB Q element array address */ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_elem_array_addr, PQI_ADMIN_OBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ob_queue.array_dma_addr)); /* Write IB Q CI address */ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_ci_addr, PQI_ADMIN_IBQ_CI_ADDR, LE_64(softs->admin_ib_queue.ci_dma_addr)); /* Write OB Q PI address */ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_pi_addr, PQI_ADMIN_OBQ_PI_ADDR, LE_64(softs->admin_ob_queue.pi_dma_addr)); /* Write Admin Q params pqi-r200a table 36 */ admin_q_param = softs->admin_ib_queue.num_elem | (softs->admin_ob_queue.num_elem << 8)| PQI_ADMIN_QUEUE_MSIX_DISABLE; PCI_MEM_PUT32(softs, &softs->pqi_reg->admin_q_param, PQI_ADMINQ_PARAM, LE_32(admin_q_param)); /* Submit cmd to create Admin Q pair */ ret = pqisrc_create_delete_adminq(softs, PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR); if (ret) { DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret); goto err_q_create; } /* Admin queue created, get ci,pi offset */ softs->admin_ib_queue.pi_register_offset =(PQISRC_PQI_REG_OFFSET + PCI_MEM_GET64(softs, &softs->pqi_reg->admin_ibq_pi_offset, PQI_ADMIN_IBQ_PI_OFFSET)); softs->admin_ib_queue.pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr + softs->admin_ib_queue.pi_register_offset); softs->admin_ob_queue.ci_register_offset = (PQISRC_PQI_REG_OFFSET + PCI_MEM_GET64(softs, &softs->pqi_reg->admin_obq_ci_offset, PQI_ADMIN_OBQ_CI_OFFSET)); softs->admin_ob_queue.ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr + softs->admin_ob_queue.ci_register_offset); os_strlcpy(softs->admin_ib_queue.lockname, "admin_ibqlock", LOCKNAME_SIZE); ret =OS_INIT_PQILOCK(softs, &softs->admin_ib_queue.lock, softs->admin_ib_queue.lockname); if(ret){ DBG_ERR("Admin spinlock initialization failed\n"); softs->admin_ib_queue.lockcreated = false; goto err_lock; } softs->admin_ib_queue.lockcreated = true; /* Print admin q config details */ pqisrc_print_adminq_config(softs); DBG_FUNC("OUT\n"); return ret; err_lock: err_q_create: os_dma_mem_free(softs, &softs->admin_queue_dma_mem); err_out: DBG_FUNC("failed OUT\n"); return ret; } /* * Subroutine used to delete an operational queue. */ int pqisrc_delete_op_queue(pqisrc_softstate_t *softs, uint32_t q_id, boolean_t ibq) { int ret = PQI_STATUS_SUCCESS; /* Firmware doesn't support this now */ #if 0 gen_adm_req_iu_t admin_req; gen_adm_resp_iu_t admin_resp; memset(&admin_req, 0, sizeof(admin_req)); memset(&admin_resp, 0, sizeof(admin_resp)); DBG_FUNC("IN\n"); admin_req.req_type.create_op_iq.qid = q_id; if (ibq) admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_IQ; else admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_OQ; ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); DBG_FUNC("OUT\n"); #endif return ret; } /* * Function used to destroy the event queue. */ void pqisrc_destroy_event_queue(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); if (softs->event_q.created == true) { int ret = PQI_STATUS_SUCCESS; ret = pqisrc_delete_op_queue(softs, softs->event_q.q_id, false); if (ret) { DBG_ERR("Failed to Delete Event Q %d\n", softs->event_q.q_id); } softs->event_q.created = false; } /* Free the memory */ os_dma_mem_free(softs, &softs->event_q_dma_mem); DBG_FUNC("OUT\n"); } /* * Function used to destroy operational ib queues. */ void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; ib_queue_t *op_ib_q = NULL; int i; DBG_FUNC("IN\n"); for (i = 0; i < softs->num_op_raid_ibq; i++) { /* OP RAID IB Q */ op_ib_q = &softs->op_raid_ib_q[i]; if (op_ib_q->created == true) { ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true); if (ret) { DBG_ERR("Failed to Delete Raid IB Q %d\n",op_ib_q->q_id); } op_ib_q->created = false; } if(op_ib_q->lockcreated==true){ OS_UNINIT_PQILOCK(&op_ib_q->lock); op_ib_q->lockcreated = false; } /* OP AIO IB Q */ op_ib_q = &softs->op_aio_ib_q[i]; if (op_ib_q->created == true) { ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true); if (ret) { DBG_ERR("Failed to Delete AIO IB Q %d\n",op_ib_q->q_id); } op_ib_q->created = false; } if(op_ib_q->lockcreated==true){ OS_UNINIT_PQILOCK(&op_ib_q->lock); op_ib_q->lockcreated = false; } } /* Free the memory */ os_dma_mem_free(softs, &softs->op_ibq_dma_mem); DBG_FUNC("OUT\n"); } /* * Function used to destroy operational ob queues. */ void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; int i; DBG_FUNC("IN\n"); for (i = 0; i < softs->num_op_obq; i++) { ob_queue_t *op_ob_q = NULL; op_ob_q = &softs->op_ob_q[i]; if (op_ob_q->created == true) { ret = pqisrc_delete_op_queue(softs, op_ob_q->q_id, false); if (ret) { DBG_ERR("Failed to Delete OB Q %d\n",op_ob_q->q_id); } op_ob_q->created = false; } } /* Free the memory */ os_dma_mem_free(softs, &softs->op_obq_dma_mem); DBG_FUNC("OUT\n"); } /* * Function used to destroy an admin queue. */ int pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); #if 0 ret = pqisrc_create_delete_adminq(softs, PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR); #endif os_dma_mem_free(softs, &softs->admin_queue_dma_mem); DBG_FUNC("OUT\n"); return ret; } /* * Function used to change operational ib queue properties. */ int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *softs, ib_queue_t *op_ib_q, uint32_t prop) { - int ret = PQI_STATUS_SUCCESS;; + int ret = PQI_STATUS_SUCCESS; gen_adm_req_iu_t admin_req; gen_adm_resp_iu_t admin_resp; memset(&admin_req, 0, sizeof(admin_req)); memset(&admin_resp, 0, sizeof(admin_resp)); DBG_FUNC("IN\n"); admin_req.fn_code = PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP; admin_req.req_type.change_op_iq_prop.qid = op_ib_q->q_id; admin_req.req_type.change_op_iq_prop.vend_specific = prop; ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); DBG_FUNC("OUT\n"); return ret; } /* * Function used to create an operational ob queue. */ int pqisrc_create_op_obq(pqisrc_softstate_t *softs, ob_queue_t *op_ob_q) { - int ret = PQI_STATUS_SUCCESS;; + int ret = PQI_STATUS_SUCCESS; gen_adm_req_iu_t admin_req; gen_adm_resp_iu_t admin_resp; DBG_FUNC("IN\n"); memset(&admin_req, 0, sizeof(admin_req)); memset(&admin_resp, 0, sizeof(admin_resp)); admin_req.fn_code = PQI_FUNCTION_CREATE_OPERATIONAL_OQ; admin_req.req_type.create_op_oq.qid = op_ob_q->q_id; admin_req.req_type.create_op_oq.intr_msg_num = op_ob_q->intr_msg_num; admin_req.req_type.create_op_oq.elem_arr_addr = op_ob_q->array_dma_addr; admin_req.req_type.create_op_oq.ob_pi_addr = op_ob_q->pi_dma_addr; admin_req.req_type.create_op_oq.num_elem = op_ob_q->num_elem; admin_req.req_type.create_op_oq.elem_len = op_ob_q->elem_size / 16; DBG_INFO("admin_req.req_type.create_op_oq.qid : %x\n",admin_req.req_type.create_op_oq.qid); DBG_INFO("admin_req.req_type.create_op_oq.intr_msg_num : %x\n", admin_req.req_type.create_op_oq.intr_msg_num ); ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); if( PQI_STATUS_SUCCESS == ret) { op_ob_q->ci_register_offset = (PQISRC_PQI_REG_OFFSET + admin_resp.resp_type.create_op_oq.ci_offset); op_ob_q->ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr + op_ob_q->ci_register_offset); } else { int i = 0; DBG_WARN("Error Status Descriptors\n"); for(i = 0; i < 4;i++) DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]); } DBG_FUNC("OUT ret : %d\n", ret); return ret; } /* * Function used to create an operational ib queue. */ int pqisrc_create_op_ibq(pqisrc_softstate_t *softs, ib_queue_t *op_ib_q) { - int ret = PQI_STATUS_SUCCESS;; + int ret = PQI_STATUS_SUCCESS; gen_adm_req_iu_t admin_req; gen_adm_resp_iu_t admin_resp; DBG_FUNC("IN\n"); memset(&admin_req, 0, sizeof(admin_req)); memset(&admin_resp, 0, sizeof(admin_resp)); admin_req.fn_code = PQI_FUNCTION_CREATE_OPERATIONAL_IQ; admin_req.req_type.create_op_iq.qid = op_ib_q->q_id; admin_req.req_type.create_op_iq.elem_arr_addr = op_ib_q->array_dma_addr; admin_req.req_type.create_op_iq.iq_ci_addr = op_ib_q->ci_dma_addr; admin_req.req_type.create_op_iq.num_elem = op_ib_q->num_elem; admin_req.req_type.create_op_iq.elem_len = op_ib_q->elem_size / 16; ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); if( PQI_STATUS_SUCCESS == ret) { op_ib_q->pi_register_offset =(PQISRC_PQI_REG_OFFSET + admin_resp.resp_type.create_op_iq.pi_offset); op_ib_q->pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr + op_ib_q->pi_register_offset); } else { int i = 0; DBG_WARN("Error Status Decsriptors\n"); for(i = 0; i < 4;i++) DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]); } DBG_FUNC("OUT ret : %d\n", ret); return ret; } /* * subroutine used to create an operational ib queue for AIO. */ int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *softs, ib_queue_t *op_aio_ib_q) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); ret = pqisrc_create_op_ibq(softs,op_aio_ib_q); if ( PQI_STATUS_SUCCESS == ret) ret = pqisrc_change_op_ibq_queue_prop(softs, op_aio_ib_q, PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO); DBG_FUNC("OUT ret : %d\n", ret); return ret; } /* * subroutine used to create an operational ib queue for RAID. */ int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *softs, ib_queue_t *op_raid_ib_q) { - int ret = PQI_STATUS_SUCCESS;; + int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); ret = pqisrc_create_op_ibq(softs,op_raid_ib_q); DBG_FUNC("OUT\n"); return ret; } /* * Allocate and create an event queue to process supported events. */ int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t alloc_size = 0; uint32_t num_elem; char *virt_addr = NULL; dma_addr_t dma_addr = 0; uint32_t event_q_pi_dma_start_offset = 0; uint32_t event_q_pi_virt_start_offset = 0; char *event_q_pi_virt_start_addr = NULL; ob_queue_t *event_q = NULL; DBG_FUNC("IN\n"); /* * Calculate memory requirements. * If event queue is shared for IO response, number of * elements in event queue depends on num elements in OP OB Q * also. Since event queue element size (32) is more than IO * response size , event queue element size need not be checked * for queue size calculation. */ #ifdef SHARE_EVENT_QUEUE_FOR_IO num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_NUM_EVENT_Q_ELEM); #else num_elem = PQISRC_NUM_EVENT_Q_ELEM; #endif alloc_size = num_elem * PQISRC_EVENT_Q_ELEM_SIZE; event_q_pi_dma_start_offset = alloc_size; event_q_pi_virt_start_offset = alloc_size; alloc_size += sizeof(uint32_t); /*For IBQ CI*/ /* Allocate memory for event queues */ softs->event_q_dma_mem.tag = "event_queue"; softs->event_q_dma_mem.size = alloc_size; softs->event_q_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN; ret = os_dma_mem_alloc(softs, &softs->event_q_dma_mem); if (ret) { DBG_ERR("Failed to Allocate Event Q ret : %d\n" , ret); goto err_out; } /* Set up the address */ virt_addr = softs->event_q_dma_mem.virt_addr; dma_addr = softs->event_q_dma_mem.dma_addr; event_q_pi_dma_start_offset += dma_addr; event_q_pi_virt_start_addr = virt_addr + event_q_pi_virt_start_offset; event_q = &softs->event_q; ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64)); FILL_QUEUE_ARRAY_ADDR(event_q,virt_addr,dma_addr); event_q->q_id = PQI_OP_EVENT_QUEUE_ID; event_q->num_elem = num_elem; event_q->elem_size = PQISRC_EVENT_Q_ELEM_SIZE; event_q->pi_dma_addr = event_q_pi_dma_start_offset; event_q->pi_virt_addr = (uint32_t *)event_q_pi_virt_start_addr; event_q->intr_msg_num = 0; /* vector zero for event */ ASSERT(!(event_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4)); ret = pqisrc_create_op_obq(softs,event_q); if (ret) { DBG_ERR("Failed to Create EventQ %d\n",event_q->q_id); goto err_out_create; } event_q->created = true; DBG_FUNC("OUT\n"); return ret; err_out_create: pqisrc_destroy_event_queue(softs); err_out: DBG_FUNC("OUT failed %d\n", ret); return PQI_STATUS_FAILURE; } /* * Allocate DMA memory and create operational ib queues. */ int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t alloc_size = 0; char *virt_addr = NULL; dma_addr_t dma_addr = 0; uint32_t ibq_size = 0; uint32_t ib_ci_dma_start_offset = 0; char *ib_ci_virt_start_addr = NULL; uint32_t ib_ci_virt_start_offset = 0; uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID; ib_queue_t *op_ib_q = NULL; uint32_t num_op_ibq = softs->num_op_raid_ibq + softs->num_op_aio_ibq; int i = 0; DBG_FUNC("IN\n"); /* Calculate memory requirements */ ibq_size = softs->num_elem_per_op_ibq * softs->ibq_elem_size; alloc_size = num_op_ibq * ibq_size; /* CI indexes starts after Queue element array */ ib_ci_dma_start_offset = alloc_size; ib_ci_virt_start_offset = alloc_size; alloc_size += num_op_ibq * sizeof(uint32_t); /*For IBQ CI*/ /* Allocate memory for IB queues */ softs->op_ibq_dma_mem.tag = "op_ib_queue"; softs->op_ibq_dma_mem.size = alloc_size; softs->op_ibq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN; ret = os_dma_mem_alloc(softs, &softs->op_ibq_dma_mem); if (ret) { DBG_ERR("Failed to Allocate Operational IBQ memory ret : %d\n", ret); goto err_out; } /* Set up the address */ virt_addr = softs->op_ibq_dma_mem.virt_addr; dma_addr = softs->op_ibq_dma_mem.dma_addr; ib_ci_dma_start_offset += dma_addr; ib_ci_virt_start_addr = virt_addr + ib_ci_virt_start_offset; ASSERT(softs->num_op_raid_ibq == softs->num_op_aio_ibq); for (i = 0; i < softs->num_op_raid_ibq; i++) { /* OP RAID IB Q */ op_ib_q = &softs->op_raid_ib_q[i]; ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64)); FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr); op_ib_q->q_id = ibq_id++; snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "raid_ibqlock%d", i); ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname); if(ret){ DBG_ERR("raid_ibqlock %d init failed\n", i); op_ib_q->lockcreated = false; goto err_lock; } op_ib_q->lockcreated = true; op_ib_q->num_elem = softs->num_elem_per_op_ibq; op_ib_q->elem_size = softs->ibq_elem_size; op_ib_q->ci_dma_addr = ib_ci_dma_start_offset + (2 * i * sizeof(uint32_t)); op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr + (2 * i * sizeof(uint32_t))); ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4)); ret = pqisrc_create_op_raid_ibq(softs, op_ib_q); if (ret) { DBG_ERR("[ %s ] Failed to Create OP Raid IBQ %d\n", __func__, op_ib_q->q_id); goto err_out_create; } op_ib_q->created = true; /* OP AIO IB Q */ virt_addr += ibq_size; dma_addr += ibq_size; op_ib_q = &softs->op_aio_ib_q[i]; ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64)); FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr); op_ib_q->q_id = ibq_id++; snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "aio_ibqlock%d", i); ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname); if(ret){ DBG_ERR("aio_ibqlock %d init failed\n", i); op_ib_q->lockcreated = false; goto err_lock; } op_ib_q->lockcreated = true; op_ib_q->num_elem = softs->num_elem_per_op_ibq; op_ib_q->elem_size = softs->ibq_elem_size; op_ib_q->ci_dma_addr = ib_ci_dma_start_offset + (((2 * i) + 1) * sizeof(uint32_t)); op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr + (((2 * i) + 1) * sizeof(uint32_t))); ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4)); ret = pqisrc_create_op_aio_ibq(softs, op_ib_q); if (ret) { DBG_ERR("Failed to Create OP AIO IBQ %d\n",op_ib_q->q_id); goto err_out_create; } op_ib_q->created = true; virt_addr += ibq_size; dma_addr += ibq_size; } DBG_FUNC("OUT\n"); return ret; err_lock: err_out_create: pqisrc_destroy_op_ib_queues(softs); err_out: DBG_FUNC("OUT failed %d\n", ret); return PQI_STATUS_FAILURE; } /* * Allocate DMA memory and create operational ob queues. */ int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t alloc_size = 0; char *virt_addr = NULL; dma_addr_t dma_addr = 0; uint32_t obq_size = 0; uint32_t ob_pi_dma_start_offset = 0; uint32_t ob_pi_virt_start_offset = 0; char *ob_pi_virt_start_addr = NULL; uint32_t obq_id = PQI_MIN_OP_OB_QUEUE_ID; ob_queue_t *op_ob_q = NULL; uint32_t num_op_obq = softs->num_op_obq; int i = 0; DBG_FUNC("IN\n"); /* * OB Q element array should be 64 byte aligned. * So the number of elements in OB Q should be multiple * of 4, so that OB Queue element size (16) * num elements * will be multiple of 64. */ ALIGN_BOUNDARY(softs->num_elem_per_op_obq, 4); obq_size = softs->num_elem_per_op_obq * softs->obq_elem_size; alloc_size += num_op_obq * obq_size; /* PI indexes starts after Queue element array */ ob_pi_dma_start_offset = alloc_size; ob_pi_virt_start_offset = alloc_size; alloc_size += num_op_obq * sizeof(uint32_t); /*For OBQ PI*/ /* Allocate memory for OB queues */ softs->op_obq_dma_mem.tag = "op_ob_queue"; softs->op_obq_dma_mem.size = alloc_size; softs->op_obq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN; ret = os_dma_mem_alloc(softs, &softs->op_obq_dma_mem); if (ret) { DBG_ERR("Failed to Allocate Operational OBQ memory ret : %d\n", ret); goto err_out; } /* Set up the address */ virt_addr = softs->op_obq_dma_mem.virt_addr; dma_addr = softs->op_obq_dma_mem.dma_addr; ob_pi_dma_start_offset += dma_addr; ob_pi_virt_start_addr = virt_addr + ob_pi_virt_start_offset; DBG_INFO("softs->num_op_obq %d\n",softs->num_op_obq); for (i = 0; i < softs->num_op_obq; i++) { op_ob_q = &softs->op_ob_q[i]; ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64)); FILL_QUEUE_ARRAY_ADDR(op_ob_q,virt_addr,dma_addr); op_ob_q->q_id = obq_id++; if(softs->share_opq_and_eventq == true) op_ob_q->intr_msg_num = i; else op_ob_q->intr_msg_num = i + 1; /* msg num zero for event */ op_ob_q->num_elem = softs->num_elem_per_op_obq; op_ob_q->elem_size = softs->obq_elem_size; op_ob_q->pi_dma_addr = ob_pi_dma_start_offset + (i * sizeof(uint32_t)); op_ob_q->pi_virt_addr = (uint32_t*)(ob_pi_virt_start_addr + (i * sizeof(uint32_t))); ASSERT(!(op_ob_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4)); ret = pqisrc_create_op_obq(softs,op_ob_q); if (ret) { DBG_ERR("Failed to Create OP OBQ %d\n",op_ob_q->q_id); goto err_out_create; } op_ob_q->created = true; virt_addr += obq_size; dma_addr += obq_size; } DBG_FUNC("OUT\n"); return ret; err_out_create: pqisrc_destroy_op_ob_queues(softs); err_out: DBG_FUNC("OUT failed %d\n", ret); return PQI_STATUS_FAILURE; } /* * Function used to create operational queues for the adapter. */ int pqisrc_create_op_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); /* Create Operational IB queues */ ret = pqisrc_alloc_and_create_ib_queues(softs); if (ret) goto err_out; /* Create Operational OB queues */ ret = pqisrc_alloc_and_create_ob_queues(softs); if (ret) goto err_out_obq; /* Create Event queue */ ret = pqisrc_alloc_and_create_event_queue(softs); if (ret) goto err_out_eventq; DBG_FUNC("OUT\n"); return ret; err_out_eventq: pqisrc_destroy_op_ob_queues(softs); err_out_obq: pqisrc_destroy_op_ib_queues(softs); err_out: DBG_FUNC("OUT failed %d\n", ret); return PQI_STATUS_FAILURE; } Index: head/sys/dev/smartpqi/smartpqi_request.c =================================================================== --- head/sys/dev/smartpqi/smartpqi_request.c (revision 359440) +++ head/sys/dev/smartpqi/smartpqi_request.c (revision 359441) @@ -1,794 +1,794 @@ /*- * Copyright (c) 2018 Microsemi Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "smartpqi_includes.h" #define SG_FLAG_LAST 0x40000000 #define SG_FLAG_CHAIN 0x80000000 /* Subroutine to find out embedded sgl count in IU */ static inline uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted) { uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU; DBG_FUNC(" IN "); /** calculate embedded sgl count using num_elem_alloted for IO **/ if(elem_alloted - 1) embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU); DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count); DBG_FUNC(" OUT "); return embedded_sgl_count; } /* Subroutine to find out contiguous free elem in IU */ static inline uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q) { uint32_t contiguous_free_elem = 0; DBG_FUNC(" IN "); if(pi >= ci) { contiguous_free_elem = (elem_in_q - pi); if(ci == 0) contiguous_free_elem -= 1; } else { contiguous_free_elem = (ci - pi - 1); } DBG_FUNC(" OUT "); return contiguous_free_elem; } /* Subroutine to find out num of elements need for the request */ static uint32_t pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count) { uint32_t num_sg; uint32_t num_elem_required = 1; DBG_FUNC(" IN "); DBG_IO("SGL_Count :%d",SG_Count); /******** If SG_Count greater than max sg per IU i.e 4 or 68 (4 is with out spanning or 68 is with spanning) chaining is required. OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then, on these two cases one element is enough. ********/ if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU) return num_elem_required; /* SGL Count Other Than First IU */ num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU; num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU); DBG_FUNC(" OUT "); return num_elem_required; } /* Subroutine to build SG list for the IU submission*/ static boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr, uint32_t num_elem_alloted) { uint32_t i; uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb); sgt_t *sgt = sg_array; sgt_t *sg_chain = NULL; boolean_t partial = false; DBG_FUNC(" IN "); DBG_IO("SGL_Count :%d",num_sg); if (0 == num_sg) { goto out; } if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) { for (i = 0; i < num_sg; i++, sgt++) { sgt->addr= OS_GET_IO_SG_ADDR(rcb,i); sgt->len= OS_GET_IO_SG_LEN(rcb,i); sgt->flags= 0; } sg_array[num_sg - 1].flags = SG_FLAG_LAST; } else { /** SGL Chaining **/ sg_chain = rcb->sg_chain_virt; sgt->addr = rcb->sg_chain_dma; sgt->len = num_sg * sizeof(sgt_t); sgt->flags = SG_FLAG_CHAIN; sgt = sg_chain; for (i = 0; i < num_sg; i++, sgt++) { sgt->addr = OS_GET_IO_SG_ADDR(rcb,i); sgt->len = OS_GET_IO_SG_LEN(rcb,i); sgt->flags = 0; } sg_chain[num_sg - 1].flags = SG_FLAG_LAST; num_sg = 1; partial = true; } out: iu_hdr->iu_length = num_sg * sizeof(sgt_t); DBG_FUNC(" OUT "); return partial; } /*Subroutine used to Build the RAID request */ static void pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb, pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted) { DBG_FUNC(" IN "); raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST; raid_req->header.comp_feature = 0; raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); raid_req->work_area[0] = 0; raid_req->work_area[1] = 0; raid_req->request_id = rcb->tag; raid_req->nexus_id = 0; raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb); memcpy(raid_req->lun_number, rcb->dvp->scsi3addr, sizeof(raid_req->lun_number)); raid_req->protocol_spec = 0; raid_req->data_direction = rcb->data_dir; raid_req->reserved1 = 0; raid_req->fence = 0; raid_req->error_index = raid_req->request_id; raid_req->reserved2 = 0; raid_req->task_attribute = OS_GET_TASK_ATTR(rcb); raid_req->command_priority = 0; raid_req->reserved3 = 0; raid_req->reserved4 = 0; raid_req->reserved5 = 0; /* As cdb and additional_cdb_bytes are contiguous, update them in a single statement */ memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen); #if 0 DBG_IO("CDB :"); for(i = 0; i < rcb->cmdlen ; i++) DBG_IO(" 0x%x \n ",raid_req->cdb[i]); #endif switch (rcb->cmdlen) { case 6: case 10: case 12: case 16: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0; break; case 20: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_4; break; case 24: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_8; break; case 28: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_12; break; case 32: default: /* todo:review again */ raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_16; break; } /* Frame SGL Descriptor */ raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb, &raid_req->header, num_elem_alloted); raid_req->header.iu_length += offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t); #if 0 DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type); DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id); DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id); DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length); DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute); DBG_IO("raid_req->lun_number : 0x%x", raid_req->lun_number); DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index); DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr); DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len); DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags); #endif rcb->success_cmp_callback = pqisrc_process_io_response_success; rcb->error_cmp_callback = pqisrc_process_raid_response_error; rcb->resp_qid = raid_req->response_queue_id; DBG_FUNC(" OUT "); } /*Subroutine used to Build the AIO request */ static void pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb, pqi_aio_req_t *aio_req, uint32_t num_elem_alloted) { DBG_FUNC(" IN "); aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST; aio_req->header.comp_feature = 0; aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); aio_req->work_area[0] = 0; aio_req->work_area[1] = 0; aio_req->req_id = rcb->tag; aio_req->res1[0] = 0; aio_req->res1[1] = 0; aio_req->nexus = rcb->ioaccel_handle; aio_req->buf_len = GET_SCSI_BUFFLEN(rcb); aio_req->data_dir = rcb->data_dir; aio_req->mem_type = 0; aio_req->fence = 0; aio_req->res2 = 0; aio_req->task_attr = OS_GET_TASK_ATTR(rcb); aio_req->cmd_prio = 0; aio_req->res3 = 0; aio_req->err_idx = aio_req->req_id; aio_req->cdb_len = rcb->cmdlen; if(rcb->cmdlen > sizeof(aio_req->cdb)) rcb->cmdlen = sizeof(aio_req->cdb); memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen); #if 0 DBG_IO("CDB : \n"); for(int i = 0; i < rcb->cmdlen ; i++) DBG_IO(" 0x%x \n",aio_req->cdb[i]); #endif memset(aio_req->lun,0,sizeof(aio_req->lun)); memset(aio_req->res4,0,sizeof(aio_req->res4)); if(rcb->encrypt_enable == true) { aio_req->encrypt_enable = true; aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index); aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower); aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper); } else { aio_req->encrypt_enable = 0; aio_req->encrypt_key_index = 0; aio_req->encrypt_twk_high = 0; aio_req->encrypt_twk_low = 0; } /* Frame SGL Descriptor */ aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb, &aio_req->header, num_elem_alloted); aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t); DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg); aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) - sizeof(iu_header_t); #if 0 DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type); DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid); DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id); DBG_IO("aio_req->nexus : 0x%x \n",aio_req->nexus); DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len); DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir); DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr); DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx); DBG_IO("aio_req->num_sg :%d",aio_req->num_sg); DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr); DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len); DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags); #endif rcb->success_cmp_callback = pqisrc_process_io_response_success; rcb->error_cmp_callback = pqisrc_process_aio_response_error; rcb->resp_qid = aio_req->response_queue_id; DBG_FUNC(" OUT "); } /*Function used to build and send RAID/AIO */ int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb) { ib_queue_t *ib_q_array = softs->op_aio_ib_q; ib_queue_t *ib_q = NULL; char *ib_iu = NULL; IO_PATH_T io_path = AIO_PATH; uint32_t TraverseCount = 0; int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb); int qindex = first_qindex; uint32_t num_op_ib_q = softs->num_op_aio_ibq; uint32_t num_elem_needed; uint32_t num_elem_alloted = 0; pqi_scsi_dev_t *devp = rcb->dvp; uint8_t raidbypass_cdb[16]; DBG_FUNC(" IN "); rcb->cdbp = OS_GET_CDBP(rcb); if(IS_AIO_PATH(devp)) { /** IO for Physical Drive **/ /** Send in AIO PATH**/ rcb->ioaccel_handle = devp->ioaccel_handle; } else { int ret = PQI_STATUS_FAILURE; /** IO for RAID Volume **/ if (devp->offload_enabled) { /** ByPass IO ,Send in AIO PATH **/ ret = pqisrc_send_scsi_cmd_raidbypass(softs, devp, rcb, raidbypass_cdb); } if (PQI_STATUS_FAILURE == ret) { /** Send in RAID PATH **/ io_path = RAID_PATH; num_op_ib_q = softs->num_op_raid_ibq; ib_q_array = softs->op_raid_ib_q; } else { rcb->cdbp = raidbypass_cdb; } } num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb)); DBG_IO("num_elem_needed :%d",num_elem_needed); do { uint32_t num_elem_available; ib_q = (ib_q_array + qindex); PQI_LOCK(&ib_q->lock); num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local, *(ib_q->ci_virt_addr), ib_q->num_elem); DBG_IO("num_elem_avialable :%d\n",num_elem_available); if(num_elem_available >= num_elem_needed) { num_elem_alloted = num_elem_needed; break; } DBG_IO("Current queue is busy! Hop to next queue\n"); PQI_UNLOCK(&ib_q->lock); qindex = (qindex + 1) % num_op_ib_q; if(qindex == first_qindex) { if (num_elem_needed == 1) break; TraverseCount += 1; num_elem_needed = 1; } }while(TraverseCount < 2); DBG_IO("num_elem_alloted :%d",num_elem_alloted); if (num_elem_alloted == 0) { DBG_WARN("OUT: IB Queues were full\n"); return PQI_STATUS_QFULL; } /* Get IB Queue Slot address to build IU */ ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size); if(io_path == AIO_PATH) { /** Build AIO structure **/ pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu, num_elem_alloted); } else { /** Build RAID structure **/ pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu, num_elem_alloted); } rcb->req_pending = true; /* Update the local PI */ ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem; DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local); DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr)); /* Inform the fw about the new IU */ PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local); PQI_UNLOCK(&ib_q->lock); DBG_FUNC(" OUT "); return PQI_STATUS_SUCCESS; } /* Subroutine used to set encryption info as part of RAID bypass IO*/ static inline void pqisrc_set_enc_info( struct pqi_enc_info *enc_info, struct raid_map *raid_map, uint64_t first_block) { uint32_t volume_blk_size; /* * Set the encryption tweak values based on logical block address. * If the block size is 512, the tweak value is equal to the LBA. * For other block sizes, tweak value is (LBA * block size) / 512. */ volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size); if (volume_blk_size != 512) first_block = (first_block * volume_blk_size) / 512; enc_info->data_enc_key_index = GET_LE16((uint8_t *)&raid_map->data_encryption_key_index); enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16)); enc_info->encrypt_tweak_lower = ((uint32_t)(first_block)); } /* * Attempt to perform offload RAID mapping for a logical volume I/O. */ #define HPSA_RAID_0 0 #define HPSA_RAID_4 1 #define HPSA_RAID_1 2 /* also used for RAID 10 */ #define HPSA_RAID_5 3 /* also used for RAID 50 */ #define HPSA_RAID_51 4 #define HPSA_RAID_6 5 /* also used for RAID 60 */ #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ #define HPSA_RAID_MAX HPSA_RAID_ADM #define HPSA_RAID_UNKNOWN 0xff /* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/ int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk, uint32_t *blk_cnt) { switch (cdb[0]) { case SCMD_WRITE_6: *is_write = true; case SCMD_READ_6: *fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) | (cdb[2] << 8) | cdb[3]); *blk_cnt = (uint32_t)cdb[4]; if (*blk_cnt == 0) *blk_cnt = 256; break; case SCMD_WRITE_10: *is_write = true; case SCMD_READ_10: *fst_blk = (uint64_t)GET_BE32(&cdb[2]); *blk_cnt = (uint32_t)GET_BE16(&cdb[7]); break; case SCMD_WRITE_12: *is_write = true; case SCMD_READ_12: *fst_blk = (uint64_t)GET_BE32(&cdb[2]); *blk_cnt = GET_BE32(&cdb[6]); break; case SCMD_WRITE_16: *is_write = true; case SCMD_READ_16: *fst_blk = GET_BE64(&cdb[2]); *blk_cnt = GET_BE32(&cdb[10]); break; default: /* Process via normal I/O path. */ return PQI_STATUS_FAILURE; } return PQI_STATUS_SUCCESS; } /* * Function used to build and send RAID bypass request to the adapter */ int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb) { struct raid_map *raid_map; boolean_t is_write = false; uint32_t map_idx; uint64_t fst_blk, lst_blk; uint32_t blk_cnt, blks_per_row; uint64_t fst_row, lst_row; uint32_t fst_row_offset, lst_row_offset; uint32_t fst_col, lst_col; uint32_t r5or6_blks_per_row; uint64_t r5or6_fst_row, r5or6_lst_row; uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset; uint32_t r5or6_fst_col, r5or6_lst_col; uint16_t data_disks_per_row, total_disks_per_row; uint16_t layout_map_count; uint32_t stripesz; uint16_t strip_sz; uint32_t fst_grp, lst_grp, cur_grp; uint32_t map_row; uint64_t disk_block; uint32_t disk_blk_cnt; uint8_t cdb_length; int offload_to_mirror; int i; DBG_FUNC(" IN \n"); DBG_IO("!!!!!\n"); /* Check for eligible opcode, get LBA and block count. */ memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen); for(i = 0; i < rcb->cmdlen ; i++) DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]); if(check_for_scsi_opcode(cdb, &is_write, &fst_blk, &blk_cnt) == PQI_STATUS_FAILURE) return PQI_STATUS_FAILURE; /* Check for write to non-RAID-0. */ if (is_write && device->raid_level != SA_RAID_0) - return PQI_STATUS_FAILURE;; + return PQI_STATUS_FAILURE; if(blk_cnt == 0) return PQI_STATUS_FAILURE; lst_blk = fst_blk + blk_cnt - 1; raid_map = device->raid_map; /* Check for invalid block or wraparound. */ if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) || lst_blk < fst_blk) return PQI_STATUS_FAILURE; data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row); strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size)); layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count)); /* Calculate stripe information for the request. */ blks_per_row = data_disks_per_row * strip_sz; if (!blks_per_row) return PQI_STATUS_FAILURE; /* use __udivdi3 ? */ fst_row = fst_blk / blks_per_row; lst_row = lst_blk / blks_per_row; fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row)); lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row)); fst_col = fst_row_offset / strip_sz; lst_col = lst_row_offset / strip_sz; /* If this isn't a single row/column then give to the controller. */ if (fst_row != lst_row || fst_col != lst_col) return PQI_STATUS_FAILURE; /* Proceeding with driver mapping. */ total_disks_per_row = data_disks_per_row + GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row)); map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) % GET_LE16((uint8_t *)(&raid_map->row_cnt)); map_idx = (map_row * total_disks_per_row) + fst_col; /* RAID 1 */ if (device->raid_level == SA_RAID_1) { if (device->offload_to_mirror) map_idx += data_disks_per_row; device->offload_to_mirror = !device->offload_to_mirror; } else if (device->raid_level == SA_RAID_ADM) { /* RAID ADM */ /* * Handles N-way mirrors (R1-ADM) and R10 with # of drives * divisible by 3. */ offload_to_mirror = device->offload_to_mirror; if (offload_to_mirror == 0) { /* use physical disk in the first mirrored group. */ map_idx %= data_disks_per_row; } else { do { /* * Determine mirror group that map_idx * indicates. */ cur_grp = map_idx / data_disks_per_row; if (offload_to_mirror != cur_grp) { if (cur_grp < layout_map_count - 1) { /* * Select raid index from * next group. */ map_idx += data_disks_per_row; cur_grp++; } else { /* * Select raid index from first * group. */ map_idx %= data_disks_per_row; cur_grp = 0; } } } while (offload_to_mirror != cur_grp); } /* Set mirror group to use next time. */ offload_to_mirror = (offload_to_mirror >= layout_map_count - 1) ? 0 : offload_to_mirror + 1; if(offload_to_mirror >= layout_map_count) return PQI_STATUS_FAILURE; device->offload_to_mirror = offload_to_mirror; /* * Avoid direct use of device->offload_to_mirror within this * function since multiple threads might simultaneously * increment it beyond the range of device->layout_map_count -1. */ } else if ((device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_6) && layout_map_count > 1) { /* RAID 50/60 */ /* Verify first and last block are in same RAID group */ r5or6_blks_per_row = strip_sz * data_disks_per_row; stripesz = r5or6_blks_per_row * layout_map_count; fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row; lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row; if (fst_grp != lst_grp) return PQI_STATUS_FAILURE; /* Verify request is in a single row of RAID 5/6 */ fst_row = r5or6_fst_row = fst_blk / stripesz; r5or6_lst_row = lst_blk / stripesz; if (r5or6_fst_row != r5or6_lst_row) return PQI_STATUS_FAILURE; /* Verify request is in a single column */ fst_row_offset = r5or6_fst_row_offset = (uint32_t)((fst_blk % stripesz) % r5or6_blks_per_row); r5or6_lst_row_offset = (uint32_t)((lst_blk % stripesz) % r5or6_blks_per_row); fst_col = r5or6_fst_row_offset / strip_sz; r5or6_fst_col = fst_col; r5or6_lst_col = r5or6_lst_row_offset / strip_sz; if (r5or6_fst_col != r5or6_lst_col) return PQI_STATUS_FAILURE; /* Request is eligible */ map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) % GET_LE16((uint8_t *)(&raid_map->row_cnt)); map_idx = (fst_grp * (GET_LE16((uint8_t *)(&raid_map->row_cnt)) * total_disks_per_row)) + (map_row * total_disks_per_row) + fst_col; } if (map_idx >= RAID_MAP_MAX_ENTRIES) return PQI_STATUS_FAILURE; rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle; disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) + fst_row * strip_sz + (fst_row_offset - fst_col * strip_sz); disk_blk_cnt = blk_cnt; /* Handle differing logical/physical block sizes. */ if (raid_map->phys_blk_shift) { disk_block <<= raid_map->phys_blk_shift; disk_blk_cnt <<= raid_map->phys_blk_shift; } if (disk_blk_cnt > 0xffff) return PQI_STATUS_FAILURE; /* Build the new CDB for the physical disk I/O. */ if (disk_block > 0xffffffff) { cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16; cdb[1] = 0; PUT_BE64(disk_block, &cdb[2]); PUT_BE32(disk_blk_cnt, &cdb[10]); cdb[14] = 0; cdb[15] = 0; cdb_length = 16; } else { cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10; cdb[1] = 0; PUT_BE32(disk_block, &cdb[2]); cdb[6] = 0; PUT_BE16(disk_blk_cnt, &cdb[7]); cdb[9] = 0; cdb_length = 10; } if (GET_LE16((uint8_t *)(&raid_map->flags)) & RAID_MAP_ENCRYPTION_ENABLED) { pqisrc_set_enc_info(&rcb->enc_info, raid_map, fst_blk); rcb->encrypt_enable = true; } else { rcb->encrypt_enable = false; } rcb->cmdlen = cdb_length; DBG_FUNC("OUT"); return PQI_STATUS_SUCCESS; } /* Function used to submit a TMF to the adater */ int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, rcb_t *rcb, int req_id, int tmf_type) { int rval = PQI_STATUS_SUCCESS; pqi_tmf_req_t tmf_req; memset(&tmf_req, 0, sizeof(pqi_tmf_req_t)); DBG_FUNC("IN"); tmf_req.header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t); tmf_req.req_id = rcb->tag; memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); tmf_req.tmf = tmf_type; tmf_req.req_id_to_manage = req_id; tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb); tmf_req.obq_id_to_manage = rcb->resp_qid; rcb->req_pending = true; rval = pqisrc_submit_cmnd(softs, &softs->op_raid_ib_q[OS_GET_TMF_REQ_QINDEX(softs, rcb)], &tmf_req); if (rval != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command rval=%d\n", rval); return rval; } rval = pqisrc_wait_on_condition(softs, rcb); if (rval != PQI_STATUS_SUCCESS){ DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type); rcb->status = REQUEST_FAILED; } if (rcb->status != REQUEST_SUCCESS) { DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d " "stat:0x%x\n", tmf_type, rcb->status); rval = PQI_STATUS_FAILURE; } DBG_FUNC("OUT"); return rval; } Index: head/sys/dev/vnic/thunder_bgx.c =================================================================== --- head/sys/dev/vnic/thunder_bgx.c (revision 359440) +++ head/sys/dev/vnic/thunder_bgx.c (revision 359441) @@ -1,1145 +1,1145 @@ /* * Copyright (C) 2015 Cavium Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "thunder_bgx.h" #include "thunder_bgx_var.h" #include "nic_reg.h" #include "nic.h" #include "lmac_if.h" #define THUNDER_BGX_DEVSTR "ThunderX BGX Ethernet I/O Interface" MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory"); #define BGX_NODE_ID_MASK 0x1 #define BGX_NODE_ID_SHIFT 24 #define DRV_NAME "thunder-BGX" #define DRV_VERSION "1.0" static int bgx_init_phy(struct bgx *); static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; static int lmac_count __unused; /* Total no of LMACs in system */ static int bgx_xaui_check_link(struct lmac *lmac); static void bgx_get_qlm_mode(struct bgx *); static void bgx_init_hw(struct bgx *); static int bgx_lmac_enable(struct bgx *, uint8_t); static void bgx_lmac_disable(struct bgx *, uint8_t); static int thunder_bgx_probe(device_t); static int thunder_bgx_attach(device_t); static int thunder_bgx_detach(device_t); static device_method_t thunder_bgx_methods[] = { /* Device interface */ DEVMETHOD(device_probe, thunder_bgx_probe), DEVMETHOD(device_attach, thunder_bgx_attach), DEVMETHOD(device_detach, thunder_bgx_detach), DEVMETHOD_END, }; static driver_t thunder_bgx_driver = { "bgx", thunder_bgx_methods, sizeof(struct lmac), }; static devclass_t thunder_bgx_devclass; DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, thunder_bgx_devclass, 0, 0); MODULE_VERSION(thunder_bgx, 1); MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1); MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1); MODULE_DEPEND(thunder_bgx, thunder_mdio, 1, 1, 1); static int thunder_bgx_probe(device_t dev) { uint16_t vendor_id; uint16_t device_id; vendor_id = pci_get_vendor(dev); device_id = pci_get_device(dev); if (vendor_id == PCI_VENDOR_ID_CAVIUM && device_id == PCI_DEVICE_ID_THUNDER_BGX) { device_set_desc(dev, THUNDER_BGX_DEVSTR); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int thunder_bgx_attach(device_t dev) { struct bgx *bgx; uint8_t lmacid; int err; int rid; struct lmac *lmac; bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO)); bgx->dev = dev; lmac = device_get_softc(dev); lmac->bgx = bgx; /* Enable bus mastering */ pci_enable_busmaster(dev); /* Allocate resources - configuration registers */ rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM); bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (bgx->reg_base == NULL) { device_printf(dev, "Could not allocate CSR memory space\n"); err = ENXIO; goto err_disable_device; } bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) & BGX_NODE_ID_MASK; bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX; bgx_vnic[bgx->bgx_id] = bgx; bgx_get_qlm_mode(bgx); err = bgx_init_phy(bgx); if (err != 0) goto err_free_res; bgx_init_hw(bgx); /* Enable all LMACs */ for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) { err = bgx_lmac_enable(bgx, lmacid); if (err) { device_printf(dev, "BGX%d failed to enable lmac%d\n", bgx->bgx_id, lmacid); goto err_free_res; } } return (0); err_free_res: bgx_vnic[bgx->bgx_id] = NULL; bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(bgx->reg_base), bgx->reg_base); err_disable_device: free(bgx, M_BGX); pci_disable_busmaster(dev); return (err); } static int thunder_bgx_detach(device_t dev) { struct lmac *lmac; struct bgx *bgx; uint8_t lmacid; lmac = device_get_softc(dev); bgx = lmac->bgx; /* Disable all LMACs */ for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) bgx_lmac_disable(bgx, lmacid); bgx_vnic[bgx->bgx_id] = NULL; bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(bgx->reg_base), bgx->reg_base); free(bgx, M_BGX); pci_disable_busmaster(dev); return (0); } /* Register read/write APIs */ static uint64_t bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset) { bus_space_handle_t addr; addr = ((uint32_t)lmac << 20) + offset; return (bus_read_8(bgx->reg_base, addr)); } static void bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val) { bus_space_handle_t addr; addr = ((uint32_t)lmac << 20) + offset; bus_write_8(bgx->reg_base, addr, val); } static void bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val) { bus_space_handle_t addr; addr = ((uint32_t)lmac << 20) + offset; bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr)); } static int bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask, boolean_t zero) { int timeout = 10; uint64_t reg_val; while (timeout) { reg_val = bgx_reg_read(bgx, lmac, reg); if (zero && !(reg_val & mask)) return (0); if (!zero && (reg_val & mask)) return (0); DELAY(100); timeout--; } return (ETIMEDOUT); } /* Return number of BGX present in HW */ u_int bgx_get_map(int node) { int i; u_int map = 0; for (i = 0; i < MAX_BGX_PER_CN88XX; i++) { if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i]) map |= (1 << i); } return (map); } /* Return number of LMAC configured for this BGX */ int bgx_get_lmac_count(int node, int bgx_idx) { struct bgx *bgx; bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; if (bgx != NULL) return (bgx->lmac_count); return (0); } /* Returns the current link status of LMAC */ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) { struct bgx_link_status *link = (struct bgx_link_status *)status; struct bgx *bgx; struct lmac *lmac; bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; if (bgx == NULL) return; lmac = &bgx->lmac[lmacid]; link->link_up = lmac->link_up; link->duplex = lmac->last_duplex; link->speed = lmac->last_speed; } const uint8_t *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) { struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; if (bgx != NULL) return (bgx->lmac[lmacid].mac); return (NULL); } void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac) { struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; if (bgx == NULL) return; memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN); } static void bgx_sgmii_change_link_state(struct lmac *lmac) { struct bgx *bgx = lmac->bgx; uint64_t cmr_cfg; uint64_t port_cfg = 0; uint64_t misc_ctl = 0; cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); cmr_cfg &= ~CMR_EN; bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); if (lmac->link_up) { misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; port_cfg &= ~GMI_PORT_CFG_DUPLEX; port_cfg |= (lmac->last_duplex << 2); } else { misc_ctl |= PCS_MISC_CTL_GMX_ENO; } switch (lmac->last_speed) { case 10: port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; misc_ctl |= 50; /* samp_pt */ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); break; case 100: port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; misc_ctl |= 5; /* samp_pt */ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); break; case 1000: port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; misc_ctl |= 1; /* samp_pt */ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); if (lmac->last_duplex) bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); else bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 8192); break; default: break; } bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); /* renable lmac */ cmr_cfg |= CMR_EN; bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); } static void bgx_lmac_handler(void *arg) { struct lmac *lmac; int link, duplex, speed; int link_changed = 0; int err; lmac = (struct lmac *)arg; err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid, &link, &duplex, &speed); if (err != 0) goto out; if (!link && lmac->last_link) link_changed = -1; if (link && (lmac->last_duplex != duplex || lmac->last_link != link || lmac->last_speed != speed)) { link_changed = 1; } lmac->last_link = link; lmac->last_speed = speed; lmac->last_duplex = duplex; if (!link_changed) goto out; if (link_changed > 0) lmac->link_up = true; else lmac->link_up = false; if (lmac->is_sgmii) bgx_sgmii_change_link_state(lmac); else bgx_xaui_check_link(lmac); out: callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac); } uint64_t bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) { struct bgx *bgx; bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; if (bgx == NULL) return (0); if (idx > 8) lmac = (0); return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8))); } uint64_t bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) { struct bgx *bgx; bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; if (bgx == NULL) return (0); return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8))); } static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) { uint64_t offset; while (bgx->lmac[lmac].dmac > 0) { offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) + (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t)); bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); bgx->lmac[lmac].dmac--; } } void bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac) { uint64_t offset; struct bgx *bgx; #ifdef BGX_IN_PROMISCUOUS_MODE return; #endif bgx_idx += node * MAX_BGX_PER_CN88XX; bgx = bgx_vnic[bgx_idx]; if (bgx == NULL) { printf("BGX%d not yet initialized, ignoring DMAC addition\n", bgx_idx); return; } dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */ if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) { device_printf(bgx->dev, "Max DMAC filters for LMAC%d reached, ignoring\n", lmac); return; } if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE) bgx->lmac[lmac].dmac = 1; offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) + (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t)); bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac); bgx->lmac[lmac].dmac++; bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL, (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) | (BCAST_ACCEPT << 0)); } /* Configure BGX LMAC in internal loopback mode */ void bgx_lmac_internal_loopback(int node, int bgx_idx, int lmac_idx, boolean_t enable) { struct bgx *bgx; struct lmac *lmac; uint64_t cfg; bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; if (bgx == NULL) return; lmac = &bgx->lmac[lmac_idx]; if (lmac->is_sgmii) { cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL); if (enable) cfg |= PCS_MRX_CTL_LOOPBACK1; else cfg &= ~PCS_MRX_CTL_LOOPBACK1; bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg); } else { cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1); if (enable) cfg |= SPU_CTL_LOOPBACK; else cfg &= ~SPU_CTL_LOOPBACK; bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg); } } static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) { uint64_t cfg; bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); /* max packet size */ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); /* Disable frame alignment if using preamble */ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); if (cfg & 1) bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); /* Enable lmac */ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); /* PCS reset */ bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET, TRUE) != 0) { device_printf(bgx->dev, "BGX PCS reset not completed\n"); return (ENXIO); } /* power down, reset autoneg, autoneg enable */ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); cfg &= ~PCS_MRX_CTL_PWR_DN; cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, PCS_MRX_STATUS_AN_CPT, FALSE) != 0) { device_printf(bgx->dev, "BGX AN_CPT not completed\n"); return (ENXIO); } return (0); } static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) { uint64_t cfg; /* Reset SPU */ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, TRUE) != 0) { device_printf(bgx->dev, "BGX SPU reset not completed\n"); return (ENXIO); } /* Disable LMAC */ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); cfg &= ~CMR_EN; bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); /* Set interleaved running disparity for RXAUI */ if (bgx->lmac_type != BGX_MODE_RXAUI) { bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); } else { bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); } /* clear all interrupts */ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); if (bgx->use_training) { bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); /* training enable */ bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); } /* Append FCS to each packet */ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); /* Disable forward error correction */ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); cfg &= ~SPU_FEC_CTL_FEC_EN; bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); /* Disable autoneg */ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); if (bgx->lmac_type == BGX_MODE_10G_KR) cfg |= (1 << 23); else if (bgx->lmac_type == BGX_MODE_40G_KR) cfg |= (1 << 24); else cfg &= ~((1 << 23) | (1 << 24)); cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12))); bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); /* Enable lmac */ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); cfg &= ~SPU_CTL_LOW_POWER; bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); cfg &= ~SMU_TX_CTL_UNI_EN; cfg |= SMU_TX_CTL_DIC_EN; bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); /* take lmac_count into account */ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); /* max packet size */ bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); return (0); } static int bgx_xaui_check_link(struct lmac *lmac) { struct bgx *bgx = lmac->bgx; int lmacid = lmac->lmacid; int lmac_type = bgx->lmac_type; uint64_t cfg; bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); if (bgx->use_training) { cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); if ((cfg & (1UL << 13)) == 0) { cfg = (1UL << 13) | (1UL << 14); bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); cfg |= (1UL << 0); bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); return (ENXIO); } } /* wait for PCS to come out of reset */ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, TRUE) != 0) { device_printf(bgx->dev, "BGX SPU reset not completed\n"); return (ENXIO); } if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, SPU_BR_STATUS_BLK_LOCK, FALSE)) { device_printf(bgx->dev, "SPU_BR_STATUS_BLK_LOCK not completed\n"); return (ENXIO); } } else { if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) { device_printf(bgx->dev, "SPU_BX_STATUS_RX_ALIGN not completed\n"); return (ENXIO); } } /* Clear rcvflt bit (latching high) and read it back */ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { device_printf(bgx->dev, "Receive fault, retry training\n"); if (bgx->use_training) { cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); if ((cfg & (1UL << 13)) == 0) { cfg = (1UL << 13) | (1UL << 14); bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); cfg |= (1UL << 0); bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); return (ENXIO); } } return (ENXIO); } /* Wait for MAC RX to be ready */ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL, SMU_RX_CTL_STATUS, TRUE) != 0) { device_printf(bgx->dev, "SMU RX link not okay\n"); return (ENXIO); } /* Wait for BGX RX to be idle */ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, FALSE) != 0) { device_printf(bgx->dev, "SMU RX not idle\n"); return (ENXIO); } /* Wait for BGX TX to be idle */ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, FALSE) != 0) { device_printf(bgx->dev, "SMU TX not idle\n"); return (ENXIO); } if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) != 0) { device_printf(bgx->dev, "Receive fault\n"); return (ENXIO); } /* Receive link is latching low. Force it high and verify it */ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK, FALSE) != 0) { device_printf(bgx->dev, "SPU receive link down\n"); return (ENXIO); } cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); cfg &= ~SPU_MISC_CTL_RX_DIS; bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); return (0); } static void bgx_poll_for_link(void *arg) { struct lmac *lmac; uint64_t link; lmac = (struct lmac *)arg; /* Receive link is latching low. Force it high and verify it */ bgx_reg_modify(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK, false); link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); if (link & SPU_STATUS1_RCV_LNK) { lmac->link_up = 1; if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) lmac->last_speed = 40000; else lmac->last_speed = 10000; lmac->last_duplex = 1; } else { lmac->link_up = 0; } if (lmac->last_link != lmac->link_up) { lmac->last_link = lmac->link_up; if (lmac->link_up) bgx_xaui_check_link(lmac); } callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac); } static int bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid) { uint64_t __unused dmac_bcast = (1UL << 48) - 1; struct lmac *lmac; uint64_t cfg; lmac = &bgx->lmac[lmacid]; lmac->bgx = bgx; if (bgx->lmac_type == BGX_MODE_SGMII) { lmac->is_sgmii = 1; if (bgx_lmac_sgmii_init(bgx, lmacid) != 0) return -1; } else { lmac->is_sgmii = 0; if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type)) return -1; } if (lmac->is_sgmii) { cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); } else { cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); } /* Enable lmac */ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); /* Restore default cfg, incase low level firmware changed it */ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); /* Add broadcast MAC into all LMAC's DMAC filters */ bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid); if ((bgx->lmac_type != BGX_MODE_XFI) && (bgx->lmac_type != BGX_MODE_XAUI) && (bgx->lmac_type != BGX_MODE_XLAUI) && (bgx->lmac_type != BGX_MODE_40G_KR) && (bgx->lmac_type != BGX_MODE_10G_KR)) { if (lmac->phy_if_dev == NULL) { device_printf(bgx->dev, "LMAC%d missing interface to PHY\n", lmacid); return (ENXIO); } if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr, lmacid) != 0) { device_printf(bgx->dev, "LMAC%d could not connect to PHY\n", lmacid); return (ENXIO); } mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF); callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0); mtx_lock(&lmac->check_link_mtx); bgx_lmac_handler(lmac); mtx_unlock(&lmac->check_link_mtx); } else { mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF); callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0); mtx_lock(&lmac->check_link_mtx); bgx_poll_for_link(lmac); mtx_unlock(&lmac->check_link_mtx); } return (0); } static void bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid) { struct lmac *lmac; uint64_t cmrx_cfg; lmac = &bgx->lmac[lmacid]; /* Stop callout */ callout_drain(&lmac->check_link); mtx_destroy(&lmac->check_link_mtx); cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); cmrx_cfg &= ~(1 << 15); bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); bgx_flush_dmac_addrs(bgx, lmacid); if ((bgx->lmac_type != BGX_MODE_XFI) && (bgx->lmac_type != BGX_MODE_XLAUI) && (bgx->lmac_type != BGX_MODE_40G_KR) && (bgx->lmac_type != BGX_MODE_10G_KR)) { if (lmac->phy_if_dev == NULL) { device_printf(bgx->dev, "LMAC%d missing interface to PHY\n", lmacid); return; } if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr, lmacid) != 0) { device_printf(bgx->dev, "LMAC%d could not disconnect PHY\n", lmacid); return; } lmac->phy_if_dev = NULL; } } static void bgx_set_num_ports(struct bgx *bgx) { uint64_t lmac_count; switch (bgx->qlm_mode) { case QLM_MODE_SGMII: bgx->lmac_count = 4; bgx->lmac_type = BGX_MODE_SGMII; bgx->lane_to_sds = 0; break; case QLM_MODE_XAUI_1X4: bgx->lmac_count = 1; bgx->lmac_type = BGX_MODE_XAUI; bgx->lane_to_sds = 0xE4; break; case QLM_MODE_RXAUI_2X2: bgx->lmac_count = 2; bgx->lmac_type = BGX_MODE_RXAUI; bgx->lane_to_sds = 0xE4; break; case QLM_MODE_XFI_4X1: bgx->lmac_count = 4; bgx->lmac_type = BGX_MODE_XFI; bgx->lane_to_sds = 0; break; case QLM_MODE_XLAUI_1X4: bgx->lmac_count = 1; bgx->lmac_type = BGX_MODE_XLAUI; bgx->lane_to_sds = 0xE4; break; case QLM_MODE_10G_KR_4X1: bgx->lmac_count = 4; bgx->lmac_type = BGX_MODE_10G_KR; bgx->lane_to_sds = 0; bgx->use_training = 1; break; case QLM_MODE_40G_KR4_1X4: bgx->lmac_count = 1; bgx->lmac_type = BGX_MODE_40G_KR; bgx->lane_to_sds = 0xE4; bgx->use_training = 1; break; default: bgx->lmac_count = 0; break; } /* * Check if low level firmware has programmed LMAC count * based on board type, if yes consider that otherwise * the default static values */ lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; if (lmac_count != 4) bgx->lmac_count = lmac_count; } static void bgx_init_hw(struct bgx *bgx) { int i; bgx_set_num_ports(bgx); bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id); /* Set lmac type and lane2serdes mapping */ for (i = 0; i < bgx->lmac_count; i++) { if (bgx->lmac_type == BGX_MODE_RXAUI) { if (i) bgx->lane_to_sds = 0x0e; else bgx->lane_to_sds = 0x04; bgx_reg_write(bgx, i, BGX_CMRX_CFG, (bgx->lmac_type << 8) | bgx->lane_to_sds); continue; } bgx_reg_write(bgx, i, BGX_CMRX_CFG, (bgx->lmac_type << 8) | (bgx->lane_to_sds + i)); bgx->lmac[i].lmacid_bd = lmac_count; lmac_count++; } bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); /* Set the backpressure AND mask */ for (i = 0; i < bgx->lmac_count; i++) { bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) << (i * MAX_BGX_CHANS_PER_LMAC)); } /* Disable all MAC filtering */ for (i = 0; i < RX_DMAC_COUNT; i++) bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); /* Disable MAC steering (NCSI traffic) */ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); } static void bgx_get_qlm_mode(struct bgx *bgx) { - device_t dev = bgx->dev;; + device_t dev = bgx->dev; int lmac_type; int train_en; /* Read LMAC0 type to figure out QLM mode * This is configured by low level firmware */ lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); lmac_type = (lmac_type >> 8) & 0x07; train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) & SPU_PMD_CRTL_TRAIN_EN; switch (lmac_type) { case BGX_MODE_SGMII: bgx->qlm_mode = QLM_MODE_SGMII; if (bootverbose) { device_printf(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id); } break; case BGX_MODE_XAUI: bgx->qlm_mode = QLM_MODE_XAUI_1X4; if (bootverbose) { device_printf(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id); } break; case BGX_MODE_RXAUI: bgx->qlm_mode = QLM_MODE_RXAUI_2X2; if (bootverbose) { device_printf(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id); } break; case BGX_MODE_XFI: if (!train_en) { bgx->qlm_mode = QLM_MODE_XFI_4X1; if (bootverbose) { device_printf(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id); } } else { bgx->qlm_mode = QLM_MODE_10G_KR_4X1; if (bootverbose) { device_printf(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id); } } break; case BGX_MODE_XLAUI: if (!train_en) { bgx->qlm_mode = QLM_MODE_XLAUI_1X4; if (bootverbose) { device_printf(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id); } } else { bgx->qlm_mode = QLM_MODE_40G_KR4_1X4; if (bootverbose) { device_printf(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id); } } break; default: bgx->qlm_mode = QLM_MODE_SGMII; if (bootverbose) { device_printf(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id); } } } static int bgx_init_phy(struct bgx *bgx) { int err; /* By default we fail */ err = ENXIO; #ifdef FDT err = bgx_fdt_init_phy(bgx); #endif #ifdef ACPI if (err != 0) { /* ARM64TODO: Add ACPI function here */ } #endif return (err); } Index: head/sys/net/if_me.c =================================================================== --- head/sys/net/if_me.c (revision 359440) +++ head/sys/net/if_me.c (revision 359441) @@ -1,665 +1,665 @@ /*- * Copyright (c) 2014, 2018 Andrey V. Elsukov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MEMTU (1500 - sizeof(struct mobhdr)) static const char mename[] = "me"; static MALLOC_DEFINE(M_IFME, mename, "Minimal Encapsulation for IP"); /* Minimal forwarding header RFC 2004 */ struct mobhdr { uint8_t mob_proto; /* protocol */ uint8_t mob_flags; /* flags */ #define MOB_FLAGS_SP 0x80 /* source present */ uint16_t mob_csum; /* header checksum */ struct in_addr mob_dst; /* original destination address */ struct in_addr mob_src; /* original source addr (optional) */ } __packed; struct me_softc { struct ifnet *me_ifp; u_int me_fibnum; struct in_addr me_src; struct in_addr me_dst; CK_LIST_ENTRY(me_softc) chain; CK_LIST_ENTRY(me_softc) srchash; }; CK_LIST_HEAD(me_list, me_softc); #define ME2IFP(sc) ((sc)->me_ifp) #define ME_READY(sc) ((sc)->me_src.s_addr != 0) #define ME_RLOCK_TRACKER struct epoch_tracker me_et #define ME_RLOCK() epoch_enter_preempt(net_epoch_preempt, &me_et) #define ME_RUNLOCK() epoch_exit_preempt(net_epoch_preempt, &me_et) #define ME_WAIT() epoch_wait_preempt(net_epoch_preempt) #ifndef ME_HASH_SIZE #define ME_HASH_SIZE (1 << 4) #endif VNET_DEFINE_STATIC(struct me_list *, me_hashtbl) = NULL; VNET_DEFINE_STATIC(struct me_list *, me_srchashtbl) = NULL; #define V_me_hashtbl VNET(me_hashtbl) #define V_me_srchashtbl VNET(me_srchashtbl) #define ME_HASH(src, dst) (V_me_hashtbl[\ me_hashval((src), (dst)) & (ME_HASH_SIZE - 1)]) #define ME_SRCHASH(src) (V_me_srchashtbl[\ fnv_32_buf(&(src), sizeof(src), FNV1_32_INIT) & (ME_HASH_SIZE - 1)]) static struct sx me_ioctl_sx; SX_SYSINIT(me_ioctl_sx, &me_ioctl_sx, "me_ioctl"); static int me_clone_create(struct if_clone *, int, caddr_t); static void me_clone_destroy(struct ifnet *); VNET_DEFINE_STATIC(struct if_clone *, me_cloner); #define V_me_cloner VNET(me_cloner) static void me_qflush(struct ifnet *); static int me_transmit(struct ifnet *, struct mbuf *); static int me_ioctl(struct ifnet *, u_long, caddr_t); static int me_output(struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *); static int me_input(struct mbuf *, int, int, void *); static int me_set_tunnel(struct me_softc *, in_addr_t, in_addr_t); static void me_delete_tunnel(struct me_softc *); SYSCTL_DECL(_net_link); static SYSCTL_NODE(_net_link, IFT_TUNNEL, me, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Minimal Encapsulation for IP (RFC 2004)"); #ifndef MAX_ME_NEST #define MAX_ME_NEST 1 #endif VNET_DEFINE_STATIC(int, max_me_nesting) = MAX_ME_NEST; #define V_max_me_nesting VNET(max_me_nesting) SYSCTL_INT(_net_link_me, OID_AUTO, max_nesting, CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(max_me_nesting), 0, "Max nested tunnels"); static uint32_t me_hashval(in_addr_t src, in_addr_t dst) { uint32_t ret; ret = fnv_32_buf(&src, sizeof(src), FNV1_32_INIT); return (fnv_32_buf(&dst, sizeof(dst), ret)); } static struct me_list * me_hashinit(void) { struct me_list *hash; int i; hash = malloc(sizeof(struct me_list) * ME_HASH_SIZE, M_IFME, M_WAITOK); for (i = 0; i < ME_HASH_SIZE; i++) CK_LIST_INIT(&hash[i]); return (hash); } static void vnet_me_init(const void *unused __unused) { V_me_cloner = if_clone_simple(mename, me_clone_create, me_clone_destroy, 0); } VNET_SYSINIT(vnet_me_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, vnet_me_init, NULL); static void vnet_me_uninit(const void *unused __unused) { if (V_me_hashtbl != NULL) { free(V_me_hashtbl, M_IFME); V_me_hashtbl = NULL; ME_WAIT(); free(V_me_srchashtbl, M_IFME); } if_clone_detach(V_me_cloner); } VNET_SYSUNINIT(vnet_me_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, vnet_me_uninit, NULL); static int me_clone_create(struct if_clone *ifc, int unit, caddr_t params) { struct me_softc *sc; sc = malloc(sizeof(struct me_softc), M_IFME, M_WAITOK | M_ZERO); sc->me_fibnum = curthread->td_proc->p_fibnum; ME2IFP(sc) = if_alloc(IFT_TUNNEL); ME2IFP(sc)->if_softc = sc; if_initname(ME2IFP(sc), mename, unit); - ME2IFP(sc)->if_mtu = MEMTU;; + ME2IFP(sc)->if_mtu = MEMTU; ME2IFP(sc)->if_flags = IFF_POINTOPOINT|IFF_MULTICAST; ME2IFP(sc)->if_output = me_output; ME2IFP(sc)->if_ioctl = me_ioctl; ME2IFP(sc)->if_transmit = me_transmit; ME2IFP(sc)->if_qflush = me_qflush; ME2IFP(sc)->if_capabilities |= IFCAP_LINKSTATE; ME2IFP(sc)->if_capenable |= IFCAP_LINKSTATE; if_attach(ME2IFP(sc)); bpfattach(ME2IFP(sc), DLT_NULL, sizeof(u_int32_t)); return (0); } static void me_clone_destroy(struct ifnet *ifp) { struct me_softc *sc; sx_xlock(&me_ioctl_sx); sc = ifp->if_softc; me_delete_tunnel(sc); bpfdetach(ifp); if_detach(ifp); ifp->if_softc = NULL; sx_xunlock(&me_ioctl_sx); ME_WAIT(); if_free(ifp); free(sc, M_IFME); } static int me_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct sockaddr_in *src, *dst; struct me_softc *sc; int error; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < 576) return (EINVAL); ifp->if_mtu = ifr->ifr_mtu; return (0); case SIOCSIFADDR: ifp->if_flags |= IFF_UP; case SIOCSIFFLAGS: case SIOCADDMULTI: case SIOCDELMULTI: return (0); } sx_xlock(&me_ioctl_sx); sc = ifp->if_softc; if (sc == NULL) { error = ENXIO; goto end; } error = 0; switch (cmd) { case SIOCSIFPHYADDR: src = &((struct in_aliasreq *)data)->ifra_addr; dst = &((struct in_aliasreq *)data)->ifra_dstaddr; if (src->sin_family != dst->sin_family || src->sin_family != AF_INET || src->sin_len != dst->sin_len || src->sin_len != sizeof(struct sockaddr_in)) { error = EINVAL; break; } if (src->sin_addr.s_addr == INADDR_ANY || dst->sin_addr.s_addr == INADDR_ANY) { error = EADDRNOTAVAIL; break; } error = me_set_tunnel(sc, src->sin_addr.s_addr, dst->sin_addr.s_addr); break; case SIOCDIFPHYADDR: me_delete_tunnel(sc); break; case SIOCGIFPSRCADDR: case SIOCGIFPDSTADDR: if (!ME_READY(sc)) { error = EADDRNOTAVAIL; break; } src = (struct sockaddr_in *)&ifr->ifr_addr; memset(src, 0, sizeof(*src)); src->sin_family = AF_INET; src->sin_len = sizeof(*src); switch (cmd) { case SIOCGIFPSRCADDR: src->sin_addr = sc->me_src; break; case SIOCGIFPDSTADDR: src->sin_addr = sc->me_dst; break; } error = prison_if(curthread->td_ucred, sintosa(src)); if (error != 0) memset(src, 0, sizeof(*src)); break; case SIOCGTUNFIB: ifr->ifr_fib = sc->me_fibnum; break; case SIOCSTUNFIB: if ((error = priv_check(curthread, PRIV_NET_GRE)) != 0) break; if (ifr->ifr_fib >= rt_numfibs) error = EINVAL; else sc->me_fibnum = ifr->ifr_fib; break; default: error = EINVAL; break; } end: sx_xunlock(&me_ioctl_sx); return (error); } static int me_lookup(const struct mbuf *m, int off, int proto, void **arg) { const struct ip *ip; struct me_softc *sc; if (V_me_hashtbl == NULL) return (0); NET_EPOCH_ASSERT(); ip = mtod(m, const struct ip *); CK_LIST_FOREACH(sc, &ME_HASH(ip->ip_dst.s_addr, ip->ip_src.s_addr), chain) { if (sc->me_src.s_addr == ip->ip_dst.s_addr && sc->me_dst.s_addr == ip->ip_src.s_addr) { if ((ME2IFP(sc)->if_flags & IFF_UP) == 0) return (0); *arg = sc; return (ENCAP_DRV_LOOKUP); } } return (0); } /* * Check that ingress address belongs to local host. */ static void me_set_running(struct me_softc *sc) { if (in_localip(sc->me_src)) ME2IFP(sc)->if_drv_flags |= IFF_DRV_RUNNING; else ME2IFP(sc)->if_drv_flags &= ~IFF_DRV_RUNNING; } /* * ifaddr_event handler. * Clear IFF_DRV_RUNNING flag when ingress address disappears to prevent * source address spoofing. */ static void me_srcaddr(void *arg __unused, const struct sockaddr *sa, int event __unused) { const struct sockaddr_in *sin; struct me_softc *sc; /* Check that VNET is ready */ if (V_me_hashtbl == NULL) return; NET_EPOCH_ASSERT(); sin = (const struct sockaddr_in *)sa; CK_LIST_FOREACH(sc, &ME_SRCHASH(sin->sin_addr.s_addr), srchash) { if (sc->me_src.s_addr != sin->sin_addr.s_addr) continue; me_set_running(sc); } } static int me_set_tunnel(struct me_softc *sc, in_addr_t src, in_addr_t dst) { struct me_softc *tmp; sx_assert(&me_ioctl_sx, SA_XLOCKED); if (V_me_hashtbl == NULL) { V_me_hashtbl = me_hashinit(); V_me_srchashtbl = me_hashinit(); } if (sc->me_src.s_addr == src && sc->me_dst.s_addr == dst) return (0); CK_LIST_FOREACH(tmp, &ME_HASH(src, dst), chain) { if (tmp == sc) continue; if (tmp->me_src.s_addr == src && tmp->me_dst.s_addr == dst) return (EADDRNOTAVAIL); } me_delete_tunnel(sc); sc->me_dst.s_addr = dst; sc->me_src.s_addr = src; CK_LIST_INSERT_HEAD(&ME_HASH(src, dst), sc, chain); CK_LIST_INSERT_HEAD(&ME_SRCHASH(src), sc, srchash); me_set_running(sc); if_link_state_change(ME2IFP(sc), LINK_STATE_UP); return (0); } static void me_delete_tunnel(struct me_softc *sc) { sx_assert(&me_ioctl_sx, SA_XLOCKED); if (ME_READY(sc)) { CK_LIST_REMOVE(sc, chain); CK_LIST_REMOVE(sc, srchash); ME_WAIT(); sc->me_src.s_addr = 0; sc->me_dst.s_addr = 0; ME2IFP(sc)->if_drv_flags &= ~IFF_DRV_RUNNING; if_link_state_change(ME2IFP(sc), LINK_STATE_DOWN); } } static uint16_t me_in_cksum(uint16_t *p, int nwords) { uint32_t sum = 0; while (nwords-- > 0) sum += *p++; sum = (sum >> 16) + (sum & 0xffff); sum += (sum >> 16); return (~sum); } static int me_input(struct mbuf *m, int off, int proto, void *arg) { struct me_softc *sc = arg; struct mobhdr *mh; struct ifnet *ifp; struct ip *ip; int hlen; NET_EPOCH_ASSERT(); ifp = ME2IFP(sc); /* checks for short packets */ hlen = sizeof(struct mobhdr); if (m->m_pkthdr.len < sizeof(struct ip) + hlen) hlen -= sizeof(struct in_addr); if (m->m_len < sizeof(struct ip) + hlen) m = m_pullup(m, sizeof(struct ip) + hlen); if (m == NULL) goto drop; mh = (struct mobhdr *)mtodo(m, sizeof(struct ip)); /* check for wrong flags */ if (mh->mob_flags & (~MOB_FLAGS_SP)) { m_freem(m); goto drop; } if (mh->mob_flags) { if (hlen != sizeof(struct mobhdr)) { m_freem(m); goto drop; } } else hlen = sizeof(struct mobhdr) - sizeof(struct in_addr); /* check mobile header checksum */ if (me_in_cksum((uint16_t *)mh, hlen / sizeof(uint16_t)) != 0) { m_freem(m); goto drop; } #ifdef MAC mac_ifnet_create_mbuf(ifp, m); #endif ip = mtod(m, struct ip *); ip->ip_dst = mh->mob_dst; ip->ip_p = mh->mob_proto; ip->ip_sum = 0; ip->ip_len = htons(m->m_pkthdr.len - hlen); if (mh->mob_flags) ip->ip_src = mh->mob_src; memmove(mtodo(m, hlen), ip, sizeof(struct ip)); m_adj(m, hlen); m_clrprotoflags(m); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID); M_SETFIB(m, ifp->if_fib); hlen = AF_INET; BPF_MTAP2(ifp, &hlen, sizeof(hlen), m); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); if ((ifp->if_flags & IFF_MONITOR) != 0) m_freem(m); else netisr_dispatch(NETISR_IP, m); return (IPPROTO_DONE); drop: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return (IPPROTO_DONE); } static int me_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro __unused) { uint32_t af; if (dst->sa_family == AF_UNSPEC) bcopy(dst->sa_data, &af, sizeof(af)); else af = dst->sa_family; m->m_pkthdr.csum_data = af; return (ifp->if_transmit(ifp, m)); } #define MTAG_ME 1414491977 static int me_transmit(struct ifnet *ifp, struct mbuf *m) { ME_RLOCK_TRACKER; struct mobhdr mh; struct me_softc *sc; struct ip *ip; uint32_t af; int error, hlen, plen; ME_RLOCK(); #ifdef MAC error = mac_ifnet_check_transmit(ifp, m); if (error != 0) goto drop; #endif error = ENETDOWN; sc = ifp->if_softc; if (sc == NULL || !ME_READY(sc) || (ifp->if_flags & IFF_MONITOR) != 0 || (ifp->if_flags & IFF_UP) == 0 || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || (error = if_tunnel_check_nesting(ifp, m, MTAG_ME, V_max_me_nesting)) != 0) { m_freem(m); goto drop; } af = m->m_pkthdr.csum_data; if (af != AF_INET) { error = EAFNOSUPPORT; m_freem(m); goto drop; } if (m->m_len < sizeof(struct ip)) m = m_pullup(m, sizeof(struct ip)); if (m == NULL) { error = ENOBUFS; goto drop; } ip = mtod(m, struct ip *); /* Fragmented datagramms shouldn't be encapsulated */ if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) { error = EINVAL; m_freem(m); goto drop; } mh.mob_proto = ip->ip_p; mh.mob_src = ip->ip_src; mh.mob_dst = ip->ip_dst; if (in_hosteq(sc->me_src, ip->ip_src)) { hlen = sizeof(struct mobhdr) - sizeof(struct in_addr); mh.mob_flags = 0; } else { hlen = sizeof(struct mobhdr); mh.mob_flags = MOB_FLAGS_SP; } BPF_MTAP2(ifp, &af, sizeof(af), m); plen = m->m_pkthdr.len; ip->ip_src = sc->me_src; ip->ip_dst = sc->me_dst; m->m_flags &= ~(M_BCAST|M_MCAST); M_SETFIB(m, sc->me_fibnum); M_PREPEND(m, hlen, M_NOWAIT); if (m == NULL) { error = ENOBUFS; goto drop; } if (m->m_len < sizeof(struct ip) + hlen) m = m_pullup(m, sizeof(struct ip) + hlen); if (m == NULL) { error = ENOBUFS; goto drop; } memmove(mtod(m, void *), mtodo(m, hlen), sizeof(struct ip)); ip = mtod(m, struct ip *); ip->ip_len = htons(m->m_pkthdr.len); ip->ip_p = IPPROTO_MOBILE; ip->ip_sum = 0; mh.mob_csum = 0; mh.mob_csum = me_in_cksum((uint16_t *)&mh, hlen / sizeof(uint16_t)); bcopy(&mh, mtodo(m, sizeof(struct ip)), hlen); error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); drop: if (error) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, plen); } ME_RUNLOCK(); return (error); } static void me_qflush(struct ifnet *ifp __unused) { } static const struct srcaddrtab *me_srcaddrtab = NULL; static const struct encaptab *ecookie = NULL; static const struct encap_config me_encap_cfg = { .proto = IPPROTO_MOBILE, .min_length = sizeof(struct ip) + sizeof(struct mobhdr) - sizeof(in_addr_t), .exact_match = ENCAP_DRV_LOOKUP, .lookup = me_lookup, .input = me_input }; static int memodevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: me_srcaddrtab = ip_encap_register_srcaddr(me_srcaddr, NULL, M_WAITOK); ecookie = ip_encap_attach(&me_encap_cfg, NULL, M_WAITOK); break; case MOD_UNLOAD: ip_encap_detach(ecookie); ip_encap_unregister_srcaddr(me_srcaddrtab); break; default: return (EOPNOTSUPP); } return (0); } static moduledata_t me_mod = { "if_me", memodevent, 0 }; DECLARE_MODULE(if_me, me_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_VERSION(if_me, 1);