Index: projects/release-pkg/lib/libvgl/text.c =================================================================== --- projects/release-pkg/lib/libvgl/text.c (revision 297926) +++ projects/release-pkg/lib/libvgl/text.c (revision 297927) @@ -1,245 +1,245 @@ /*- * Copyright (c) 1991-1997 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include "vgl.h" -static VGLText *VGLTextFont = 0; +static VGLText *VGLTextFont; extern byte VGLFont[]; int VGLTextSetFontFile(char *filename) { FILE *fd; if (VGLTextFont) { if (VGLTextFont->BitmapArray != VGLFont) free (VGLTextFont->BitmapArray); free(VGLTextFont); } if ((VGLTextFont=(VGLText*)malloc(sizeof(VGLText))) == (VGLText*)0) return 1; if (filename==NULL) { VGLTextFont->Width = 8; VGLTextFont->Height = 8; VGLTextFont->BitmapArray = VGLFont; } else { if ((fd=fopen(filename, "r"))==(FILE*)0) return 1; fread(&VGLTextFont->Width, 1 , 1, fd); fread(&VGLTextFont->Height, 1 , 1, fd); VGLTextFont->BitmapArray = (byte*)malloc(256*((VGLTextFont->Width + 7)/8)*VGLTextFont->Height); fread(VGLTextFont->BitmapArray, 1, (256*VGLTextFont->Width* VGLTextFont->Height), fd); fclose(fd); } return 0; } void VGLBitmapPutChar(VGLBitmap *Object, int x, int y, byte ch, byte fgcol, byte bgcol, int fill, int dir) { int lin, bit; for(lin = 0; lin < VGLTextFont->Height; lin++) { for(bit = 0; bit < VGLTextFont->Width; bit++) { if (VGLTextFont->BitmapArray[((ch*VGLTextFont->Height)+lin)]&(1<Width), y, str[pos], fgcol, bgcol, fill, dir); break; case 1: VGLBitmapPutChar(Object, x, y-(pos*VGLTextFont->Width), str[pos], fgcol, bgcol, fill, dir); break; case 2: VGLBitmapPutChar(Object, x-(pos*VGLTextFont->Width), y, str[pos], fgcol, bgcol, fill, dir); break; case 3: VGLBitmapPutChar(Object, x, y+(pos*VGLTextFont->Width), str[pos], fgcol, bgcol, fill, dir); break; case 4: VGLBitmapPutChar(Object, x+(pos*VGLTextFont->Width), y-(pos*VGLTextFont->Width), str[pos], fgcol, bgcol, fill, dir); break; } } } byte VGLFont[] = { 0,0,0,0,0,0,0,0,126,129,165,129,189,153,129,126,126,255,219,255,195,231, 255,126,108,254,254,254,124,56,16,0,16,56,124,254,124,56,16,0,56,124,56, 254,254,124,56,124,16,16,56,124,254,124,56,124,0,0,24,60,60,24,0,0,255, 255,231,195,195,231,255,255,0,60,102,66,66,102,60,0,255,195,153,189,189, 153,195,255,15,7,15,125,204,204,204,120,60,102,102,102,60,24,126,24,63, 51,63,48,48,112,240,224,127,99,127,99,99,103,230,192,153,90,60,231,231, 60,90,153,128,224,248,254,248,224,128,0,2,14,62,254,62,14,2,0,24,60,126, 24,24,126,60,24,102,102,102,102,102,0,102,0,127,219,219,123,27,27,27,0, 62,99,56,108,108,56,204,120,0,0,0,0,126,126,126,0,24,60,126,24,126,60,24, 255,24,60,126,24,24,24,24,0,24,24,24,24,126,60,24,0,0,24,12,254,12,24,0, 0,0,48,96,254,96,48,0,0,0,0,192,192,192,254,0,0,0,36,102,255,102,36,0,0, 0,24,60,126,255,255,0,0,0,255,255,126,60,24,0,0,0,0,0,0,0,0,0,0,48,120, 120,48,48,0,48,0,108,108,108,0,0,0,0,0,108,108,254,108,254,108,108,0,48, 124,192,120,12,248,48,0,0,198,204,24,48,102,198,0,56,108,56,118,220,204, 118,0,96,96,192,0,0,0,0,0,24,48,96,96,96,48,24,0,96,48,24,24,24,48,96,0, 0,102,60,255,60,102,0,0,0,48,48,252,48,48,0,0,0,0,0,0,0,48,48,96,0,0,0, 252,0,0,0,0,0,0,0,0,0,48,48,0,6,12,24,48,96,192,128,0,124,198,206,222,246, 230,124,0,48,112,48,48,48,48,252,0,120,204,12,56,96,204,252,0,120,204,12, 56,12,204,120,0,28,60,108,204,254,12,30,0,252,192,248,12,12,204,120,0,56, 96,192,248,204,204,120,0,252,204,12,24,48,48,48,0,120,204,204,120,204,204, 120,0,120,204,204,124,12,24,112,0,0,48,48,0,0,48,48,0,0,48,48,0,0,48,48, 96,24,48,96,192,96,48,24,0,0,0,252,0,0,252,0,0,96,48,24,12,24,48,96,0,120, 204,12,24,48,0,48,0,124,198,222,222,222,192,120,0,48,120,204,204,252,204, 204,0,252,102,102,124,102,102,252,0,60,102,192,192,192,102,60,0,248,108, 102,102,102,108,248,0,254,98,104,120,104,98,254,0,254,98,104,120,104,96, 240,0,60,102,192,192,206,102,62,0,204,204,204,252,204,204,204,0,120,48, 48,48,48,48,120,0,30,12,12,12,204,204,120,0,230,102,108,120,108,102,230, 0,240,96,96,96,98,102,254,0,198,238,254,254,214,198,198,0,198,230,246,222, 206,198,198,0,56,108,198,198,198,108,56,0,252,102,102,124,96,96,240,0,120, 204,204,204,220,120,28,0,252,102,102,124,108,102,230,0,120,204,224,112, 28,204,120,0,252,180,48,48,48,48,120,0,204,204,204,204,204,204,252,0,204, 204,204,204,204,120,48,0,198,198,198,214,254,238,198,0,198,198,108,56,56, 108,198,0,204,204,204,120,48,48,120,0,254,198,140,24,50,102,254,0,120,96, 96,96,96,96,120,0,192,96,48,24,12,6,2,0,120,24,24,24,24,24,120,0,16,56, 108,198,0,0,0,0,0,0,0,0,0,0,0,255,48,48,24,0,0,0,0,0,0,0,120,12,124,204, 118,0,224,96,96,124,102,102,220,0,0,0,120,204,192,204,120,0,28,12,12,124, 204,204,118,0,0,0,120,204,252,192,120,0,56,108,96,240,96,96,240,0,0,0,118, 204,204,124,12,248,224,96,108,118,102,102,230,0,48,0,112,48,48,48,120,0, 12,0,12,12,12,204,204,120,224,96,102,108,120,108,230,0,112,48,48,48,48, 48,120,0,0,0,204,254,254,214,198,0,0,0,248,204,204,204,204,0,0,0,120,204, 204,204,120,0,0,0,220,102,102,124,96,240,0,0,118,204,204,124,12,30,0,0, 220,118,102,96,240,0,0,0,124,192,120,12,248,0,16,48,124,48,48,52,24,0,0, 0,204,204,204,204,118,0,0,0,204,204,204,120,48,0,0,0,198,214,254,254,108, 0,0,0,198,108,56,108,198,0,0,0,204,204,204,124,12,248,0,0,252,152,48,100, 252,0,28,48,48,224,48,48,28,0,24,24,24,0,24,24,24,0,224,48,48,28,48,48, 224,0,118,220,0,0,0,0,0,0,0,16,56,108,198,198,254,0,0,0,0,0,0,0,0,0,0,0, 60,126,255,126,24,0,170,85,85,170,170,85,85,170,68,68,68,68,31,4,4,4,124, 64,64,64,31,16,16,16,56,68,68,56,30,17,20,19,64,64,64,124,31,16,16,16,56, 108,56,0,0,0,0,0,0,0,24,24,24,24,126,0,68,100,76,68,16,16,16,31,68,68,40, 16,31,4,4,4,24,24,24,24,248,0,0,0,0,0,0,0,248,24,24,24,0,0,0,0,31,24,24, 24,24,24,24,24,31,0,0,0,24,24,24,24,255,24,24,24,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,24,24,24, 24,31,24,24,24,24,24,24,24,248,24,24,24,24,24,24,24,255,0,0,0,0,0,0,0,255, 24,24,24,24,24,24,24,24,24,24,24,0,12,48,96,24,12,126,0,0,48,12,6,24,48, 126,0,0,0,3,62,54,54,108,0,0,0,4,126,16,126,64,0,0,28,48,48,48,48,126,0, 0,0,0,24,0,0,0,0,0,0,0,0,0,0,0,0,48,0,48,48,120,120,48,0,0,0,16,124,192, 192,124,16,0,56,96,96,240,96,252,0,0,195,60,102,102,60,195,0,0,204,204, 120,48,252,48,0,24,24,24,0,24,24,24,0,126,192,124,198,124,6,252,0,198,0, 0,0,0,0,0,0,124,130,186,162,186,130,124,0,28,6,30,34,31,63,0,0,0,51,102, 204,102,51,0,0,0,254,6,0,0,0,0,0,0,0,0,0,0,0,0,0,124,130,186,178,170,130, 124,0,254,0,0,0,0,0,0,0,56,108,56,0,0,0,0,0,0,16,124,16,0,124,0,0,28,54, 6,24,62,0,0,0,30,2,14,2,30,0,0,0,24,48,0,0,0,0,0,0,0,0,204,204,204,204, 118,192,126,202,202,126,10,10,10,0,0,0,0,24,0,0,0,0,0,0,0,0,0,0,24,48,6, 14,6,6,6,0,0,0,14,17,17,17,14,31,0,0,0,204,102,51,102,204,0,0,96,224,102, 108,51,103,15,3,96,224,102,108,54,106,4,14,240,32,150,108,51,103,15,3,48, 0,48,96,192,204,120,0,24,12,48,120,204,252,204,0,96,192,48,120,204,252, 204,0,120,132,48,120,204,252,204,0,102,152,48,120,204,252,204,0,204,0,48, 120,204,252,204,0,48,72,48,120,204,252,204,0,62,120,152,156,248,152,158, 0,60,102,192,192,192,102,28,48,48,24,254,98,120,98,254,0,24,48,254,98,120, 98,254,0,56,68,254,98,120,98,254,0,102,0,254,98,120,98,254,0,96,48,120, 48,48,48,120,0,24,48,120,48,48,48,120,0,120,132,120,48,48,48,120,0,204, 0,120,48,48,48,120,0,120,108,102,246,102,108,120,0,102,152,230,246,222, 206,198,0,48,24,124,198,198,198,124,0,24,48,124,198,198,198,124,0,56,68, 124,198,198,198,124,0,102,152,124,198,198,198,124,0,198,0,124,198,198,198, 124,0,0,198,108,56,56,108,198,0,6,124,206,154,178,230,120,192,96,48,204, 204,204,204,252,0,24,48,204,204,204,204,252,0,120,132,204,204,204,204,252, 0,204,0,204,204,204,204,252,0,24,48,204,204,120,48,120,0,96,120,108,120, 96,96,96,0,120,204,196,220,198,198,220,192,48,24,120,12,124,204,118,0,24, 48,120,12,124,204,118,0,120,132,120,12,124,204,118,0,102,152,120,12,124, 204,118,0,204,0,120,12,124,204,118,0,48,72,56,12,124,204,118,0,0,0,236, 50,126,176,110,0,0,0,60,102,192,102,28,48,48,24,120,204,252,192,120,0,24, 48,120,204,252,192,120,0,120,132,120,204,252,192,120,0,204,0,120,204,252, 192,120,0,96,48,0,112,48,48,120,0,24,48,0,112,48,48,120,0,112,136,0,112, 48,48,120,0,204,0,0,112,48,48,120,0,108,56,108,12,108,204,120,0,102,152, 248,204,204,204,204,0,96,48,0,124,198,198,124,0,24,48,0,124,198,198,124, 0,56,68,0,124,198,198,124,0,102,152,0,124,198,198,124,0,198,0,0,124,198, 198,124,0,0,0,24,0,126,0,24,0,0,0,6,124,222,246,124,192,96,48,0,204,204, 204,118,0,24,48,0,204,204,204,118,0,48,72,0,204,204,204,118,0,204,0,0,204, 204,204,118,0,24,48,204,204,204,124,12,248,224,120,108,102,108,120,224, 0,204,0,204,204,204,124,12,248 }; Index: projects/release-pkg/sys/arm/broadcom/bcm2835/bcm2835_audio.c =================================================================== --- projects/release-pkg/sys/arm/broadcom/bcm2835/bcm2835_audio.c (revision 297926) +++ projects/release-pkg/sys/arm/broadcom/bcm2835/bcm2835_audio.c (revision 297927) @@ -1,886 +1,895 @@ /*- * Copyright (c) 2015 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include "mixer_if.h" #include "interface/compat/vchi_bsd.h" #include "interface/vchi/vchi.h" #include "interface/vchiq_arm/vchiq.h" #include "vc_vchi_audioserv_defs.h" SND_DECLARE_FILE("$FreeBSD$"); #define DEST_AUTO 0 #define DEST_HEADPHONES 1 #define DEST_HDMI 2 #define VCHIQ_AUDIO_PACKET_SIZE 4000 #define VCHIQ_AUDIO_BUFFER_SIZE 128000 +#define VCHIQ_AUDIO_PREBUFFER 10 /* Number of pre-buffered audio messages */ #define VCHIQ_AUDIO_MAX_VOLUME /* volume in terms of 0.01dB */ #define VCHIQ_AUDIO_VOLUME_MIN -10239 #define VCHIQ_AUDIO_VOLUME(db100) (uint32_t)(-((db100) << 8)/100) /* dB levels with 5% volume step */ static int db_levels[] = { VCHIQ_AUDIO_VOLUME_MIN, -4605, -3794, -3218, -2772, -2407, -2099, -1832, -1597, -1386, -1195, -1021, -861, -713, -575, -446, -325, -210, -102, 0, }; static uint32_t bcm2835_audio_playfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S8, 1, 0), SND_FORMAT(AFMT_S8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_U16_LE, 1, 0), SND_FORMAT(AFMT_U16_LE, 2, 0), 0 }; static struct pcmchan_caps bcm2835_audio_playcaps = {8000, 48000, bcm2835_audio_playfmt, 0}; struct bcm2835_audio_info; #define PLAYBACK_IDLE 0 #define PLAYBACK_STARTING 1 #define PLAYBACK_PLAYING 2 #define PLAYBACK_STOPPING 3 struct bcm2835_audio_chinfo { struct bcm2835_audio_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; uint32_t fmt, spd, blksz; uint32_t complete_pos; uint32_t free_buffer; uint32_t buffered_ptr; int playback_state; + int prebuffered; }; struct bcm2835_audio_info { device_t dev; unsigned int bufsz; struct bcm2835_audio_chinfo pch; uint32_t dest, volume; struct mtx *lock; struct intr_config_hook intr_hook; /* VCHI data */ struct mtx vchi_lock; VCHI_INSTANCE_T vchi_instance; VCHI_CONNECTION_T *vchi_connection; VCHI_SERVICE_HANDLE_T vchi_handle; struct mtx data_lock; struct cv data_cv; /* Unloadign module */ int unloading; }; #define bcm2835_audio_lock(_ess) snd_mtxlock((_ess)->lock) #define bcm2835_audio_unlock(_ess) snd_mtxunlock((_ess)->lock) #define bcm2835_audio_lock_assert(_ess) snd_mtxassert((_ess)->lock) #define VCHIQ_VCHI_LOCK(sc) mtx_lock(&(sc)->vchi_lock) #define VCHIQ_VCHI_UNLOCK(sc) mtx_unlock(&(sc)->vchi_lock) static const char * dest_description(uint32_t dest) { switch (dest) { case DEST_AUTO: return "AUTO"; break; case DEST_HEADPHONES: return "HEADPHONES"; break; case DEST_HDMI: return "HDMI"; break; default: return "UNKNOWN"; break; } } static void bcm2835_audio_callback(void *param, const VCHI_CALLBACK_REASON_T reason, void *msg_handle) { struct bcm2835_audio_info *sc = (struct bcm2835_audio_info *)param; int32_t status; uint32_t msg_len; VC_AUDIO_MSG_T m; if (reason != VCHI_CALLBACK_MSG_AVAILABLE) return; status = vchi_msg_dequeue(sc->vchi_handle, &m, sizeof m, &msg_len, VCHI_FLAGS_NONE); if (m.type == VC_AUDIO_MSG_TYPE_RESULT) { if (m.u.result.success) { device_printf(sc->dev, "msg type %08x failed\n", m.type); } } else if (m.type == VC_AUDIO_MSG_TYPE_COMPLETE) { struct bcm2835_audio_chinfo *ch = m.u.complete.cookie; int count = m.u.complete.count & 0xffff; int perr = (m.u.complete.count & (1U << 30)) != 0; ch->complete_pos = (ch->complete_pos + count) % sndbuf_getsize(ch->buffer); ch->free_buffer += count; + chn_intr(sc->pch.channel); - if (perr || ch->free_buffer >= VCHIQ_AUDIO_PACKET_SIZE) { - chn_intr(ch->channel); + if (perr || ch->free_buffer >= VCHIQ_AUDIO_PACKET_SIZE) cv_signal(&sc->data_cv); - } } else printf("%s: unknown m.type: %d\n", __func__, m.type); } /* VCHIQ stuff */ static void bcm2835_audio_init(struct bcm2835_audio_info *sc) { int status; /* Initialize and create a VCHI connection */ status = vchi_initialise(&sc->vchi_instance); if (status != 0) { printf("vchi_initialise failed: %d\n", status); return; } status = vchi_connect(NULL, 0, sc->vchi_instance); if (status != 0) { printf("vchi_connect failed: %d\n", status); return; } SERVICE_CREATION_T params = { VCHI_VERSION_EX(VC_AUDIOSERV_VER, VC_AUDIOSERV_MIN_VER), VC_AUDIO_SERVER_NAME, /* 4cc service code */ sc->vchi_connection, /* passed in fn pointers */ 0, /* rx fifo size */ 0, /* tx fifo size */ bcm2835_audio_callback, /* service callback */ sc, /* service callback parameter */ 1, 1, 0 /* want crc check on bulk transfers */ }; status = vchi_service_open(sc->vchi_instance, ¶ms, &sc->vchi_handle); if (status == 0) /* Finished with the service for now */ vchi_service_release(sc->vchi_handle); else sc->vchi_handle = VCHIQ_SERVICE_HANDLE_INVALID; } static void bcm2835_audio_release(struct bcm2835_audio_info *sc) { int success; if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) { vchi_service_use(sc->vchi_handle); success = vchi_service_close(sc->vchi_handle); if (success != 0) printf("vchi_service_close failed: %d\n", success); sc->vchi_handle = VCHIQ_SERVICE_HANDLE_INVALID; } vchi_disconnect(sc->vchi_instance); } static void bcm2835_audio_reset_channel(struct bcm2835_audio_chinfo *ch) { ch->free_buffer = VCHIQ_AUDIO_BUFFER_SIZE; ch->playback_state = 0; ch->buffered_ptr = 0; ch->complete_pos = 0; + ch->prebuffered = 0; sndbuf_reset(ch->buffer); } static void bcm2835_audio_start(struct bcm2835_audio_chinfo *ch) { VC_AUDIO_MSG_T m; int ret; struct bcm2835_audio_info *sc = ch->parent; VCHIQ_VCHI_LOCK(sc); if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) { vchi_service_use(sc->vchi_handle); bcm2835_audio_reset_channel(ch); m.type = VC_AUDIO_MSG_TYPE_START; ret = vchi_msg_queue(sc->vchi_handle, &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL); if (ret != 0) printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret); vchi_service_release(sc->vchi_handle); } VCHIQ_VCHI_UNLOCK(sc); } static void bcm2835_audio_stop(struct bcm2835_audio_chinfo *ch) { VC_AUDIO_MSG_T m; int ret; struct bcm2835_audio_info *sc = ch->parent; VCHIQ_VCHI_LOCK(sc); if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) { vchi_service_use(sc->vchi_handle); m.type = VC_AUDIO_MSG_TYPE_STOP; m.u.stop.draining = 0; ret = vchi_msg_queue(sc->vchi_handle, &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL); if (ret != 0) printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret); vchi_service_release(sc->vchi_handle); } VCHIQ_VCHI_UNLOCK(sc); } static void bcm2835_audio_open(struct bcm2835_audio_info *sc) { VC_AUDIO_MSG_T m; int ret; VCHIQ_VCHI_LOCK(sc); if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) { vchi_service_use(sc->vchi_handle); m.type = VC_AUDIO_MSG_TYPE_OPEN; ret = vchi_msg_queue(sc->vchi_handle, &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL); if (ret != 0) printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret); vchi_service_release(sc->vchi_handle); } VCHIQ_VCHI_UNLOCK(sc); } static void bcm2835_audio_update_controls(struct bcm2835_audio_info *sc) { VC_AUDIO_MSG_T m; int ret, db; VCHIQ_VCHI_LOCK(sc); if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) { vchi_service_use(sc->vchi_handle); m.type = VC_AUDIO_MSG_TYPE_CONTROL; m.u.control.dest = sc->dest; if (sc->volume > 99) sc->volume = 99; db = db_levels[sc->volume/5]; m.u.control.volume = VCHIQ_AUDIO_VOLUME(db); ret = vchi_msg_queue(sc->vchi_handle, &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL); if (ret != 0) printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret); vchi_service_release(sc->vchi_handle); } VCHIQ_VCHI_UNLOCK(sc); } static void bcm2835_audio_update_params(struct bcm2835_audio_info *sc, struct bcm2835_audio_chinfo *ch) { VC_AUDIO_MSG_T m; int ret; VCHIQ_VCHI_LOCK(sc); if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) { vchi_service_use(sc->vchi_handle); m.type = VC_AUDIO_MSG_TYPE_CONFIG; m.u.config.channels = AFMT_CHANNEL(ch->fmt); m.u.config.samplerate = ch->spd; m.u.config.bps = AFMT_BIT(ch->fmt); ret = vchi_msg_queue(sc->vchi_handle, &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL); if (ret != 0) printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret); vchi_service_release(sc->vchi_handle); } VCHIQ_VCHI_UNLOCK(sc); } static __inline uint32_t vchiq_unbuffered_bytes(struct bcm2835_audio_chinfo *ch) { uint32_t size, ready, readyptr, readyend; size = sndbuf_getsize(ch->buffer); readyptr = sndbuf_getreadyptr(ch->buffer); ready = sndbuf_getready(ch->buffer); readyend = readyptr + ready; /* Normal case */ if (ch->buffered_ptr >= readyptr) { if (readyend > ch->buffered_ptr) return readyend - ch->buffered_ptr; else return 0; } else { /* buffered_ptr overflow */ if (readyend > ch->buffered_ptr + size) return readyend - ch->buffered_ptr - size; else return 0; } } static void bcm2835_audio_write_samples(struct bcm2835_audio_chinfo *ch) { struct bcm2835_audio_info *sc = ch->parent; VC_AUDIO_MSG_T m; void *buf; uint32_t count, size; int ret; VCHIQ_VCHI_LOCK(sc); if (sc->vchi_handle == VCHIQ_SERVICE_HANDLE_INVALID) { VCHIQ_VCHI_UNLOCK(sc); return; } vchi_service_use(sc->vchi_handle); size = sndbuf_getsize(ch->buffer); count = vchiq_unbuffered_bytes(ch); buf = (uint8_t*)sndbuf_getbuf(ch->buffer) + ch->buffered_ptr; if (ch->buffered_ptr + count > size) count = size - ch->buffered_ptr; if (count < VCHIQ_AUDIO_PACKET_SIZE) goto done; count = min(count, ch->free_buffer); count -= count % VCHIQ_AUDIO_PACKET_SIZE; m.type = VC_AUDIO_MSG_TYPE_WRITE; m.u.write.count = count; m.u.write.max_packet = VCHIQ_AUDIO_PACKET_SIZE; m.u.write.callback = NULL; m.u.write.cookie = ch; if (buf) m.u.write.silence = 0; else m.u.write.silence = 1; ret = vchi_msg_queue(sc->vchi_handle, &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL); if (ret != 0) printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret); if (buf) { while (count > 0) { int bytes = MIN((int)m.u.write.max_packet, (int)count); ch->free_buffer -= bytes; ch->buffered_ptr += bytes; ch->buffered_ptr = ch->buffered_ptr % size; ret = vchi_msg_queue(sc->vchi_handle, buf, bytes, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL); if (ret != 0) printf("%s: vchi_msg_queue failed: %d\n", __func__, ret); buf = (char *)buf + bytes; count -= bytes; } } done: vchi_service_release(sc->vchi_handle); VCHIQ_VCHI_UNLOCK(sc); } static void bcm2835_audio_worker(void *data) { struct bcm2835_audio_info *sc = (struct bcm2835_audio_info *)data; struct bcm2835_audio_chinfo *ch = &sc->pch; mtx_lock(&sc->data_lock); while(1) { if (sc->unloading) break; - if ((ch->playback_state == PLAYBACK_PLAYING) && - (vchiq_unbuffered_bytes(ch) >= VCHIQ_AUDIO_PACKET_SIZE) - && (ch->free_buffer >= VCHIQ_AUDIO_PACKET_SIZE)) { - bcm2835_audio_write_samples(ch); - } else { - if (ch->playback_state == PLAYBACK_STOPPING) { - bcm2835_audio_reset_channel(&sc->pch); - ch->playback_state = PLAYBACK_IDLE; - } - + if (ch->playback_state == PLAYBACK_IDLE) { cv_wait_sig(&sc->data_cv, &sc->data_lock); + continue; + } - if (ch->playback_state == PLAYBACK_STARTING) { - /* Give it initial kick */ - chn_intr(sc->pch.channel); + if (ch->playback_state == PLAYBACK_STOPPING) { + bcm2835_audio_reset_channel(&sc->pch); + ch->playback_state = PLAYBACK_IDLE; + continue; + } + + if (ch->free_buffer < vchiq_unbuffered_bytes(ch)) { + cv_timedwait_sig(&sc->data_cv, &sc->data_lock, 10); + continue; + } + + + bcm2835_audio_write_samples(ch); + + if (ch->playback_state == PLAYBACK_STARTING) { + ch->prebuffered++; + if (ch->prebuffered == VCHIQ_AUDIO_PREBUFFER) { + bcm2835_audio_start(ch); ch->playback_state = PLAYBACK_PLAYING; } } } mtx_unlock(&sc->data_lock); kproc_exit(0); } static void bcm2835_audio_create_worker(struct bcm2835_audio_info *sc) { struct proc *newp; if (kproc_create(bcm2835_audio_worker, (void*)sc, &newp, 0, 0, "bcm2835_audio_worker") != 0) { printf("failed to create bcm2835_audio_worker\n"); } } /* -------------------------------------------------------------------- */ -/* channel interface for ESS18xx */ +/* channel interface for VCHI audio */ static void * bcmchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct bcm2835_audio_info *sc = devinfo; struct bcm2835_audio_chinfo *ch = &sc->pch; void *buffer; if (dir == PCMDIR_REC) return NULL; ch->parent = sc; ch->channel = c; ch->buffer = b; /* default values */ ch->spd = 44100; ch->fmt = SND_FORMAT(AFMT_S16_LE, 2, 0); ch->blksz = VCHIQ_AUDIO_PACKET_SIZE; buffer = malloc(sc->bufsz, M_DEVBUF, M_WAITOK | M_ZERO); if (sndbuf_setup(ch->buffer, buffer, sc->bufsz) != 0) { free(buffer, M_DEVBUF); return NULL; } bcm2835_audio_update_params(sc, ch); return ch; } static int bcmchan_free(kobj_t obj, void *data) { struct bcm2835_audio_chinfo *ch = data; void *buffer; buffer = sndbuf_getbuf(ch->buffer); if (buffer) free(buffer, M_DEVBUF); return (0); } static int bcmchan_setformat(kobj_t obj, void *data, uint32_t format) { struct bcm2835_audio_chinfo *ch = data; struct bcm2835_audio_info *sc = ch->parent; bcm2835_audio_lock(sc); ch->fmt = format; bcm2835_audio_update_params(sc, ch); bcm2835_audio_unlock(sc); return 0; } static uint32_t bcmchan_setspeed(kobj_t obj, void *data, uint32_t speed) { struct bcm2835_audio_chinfo *ch = data; struct bcm2835_audio_info *sc = ch->parent; bcm2835_audio_lock(sc); ch->spd = speed; bcm2835_audio_update_params(sc, ch); bcm2835_audio_unlock(sc); return ch->spd; } static uint32_t bcmchan_setblocksize(kobj_t obj, void *data, uint32_t blocksize) { struct bcm2835_audio_chinfo *ch = data; return ch->blksz; } static int bcmchan_trigger(kobj_t obj, void *data, int go) { struct bcm2835_audio_chinfo *ch = data; struct bcm2835_audio_info *sc = ch->parent; if (!PCMTRIG_COMMON(go)) return (0); bcm2835_audio_lock(sc); switch (go) { case PCMTRIG_START: - bcm2835_audio_start(ch); ch->playback_state = PLAYBACK_STARTING; /* wakeup worker thread */ cv_signal(&sc->data_cv); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: - ch->playback_state = 1; + ch->playback_state = PLAYBACK_STOPPING; bcm2835_audio_stop(ch); break; default: break; } bcm2835_audio_unlock(sc); return 0; } static uint32_t bcmchan_getptr(kobj_t obj, void *data) { struct bcm2835_audio_chinfo *ch = data; struct bcm2835_audio_info *sc = ch->parent; uint32_t ret; bcm2835_audio_lock(sc); ret = ch->complete_pos - (ch->complete_pos % VCHIQ_AUDIO_PACKET_SIZE); bcm2835_audio_unlock(sc); return ret; } static struct pcmchan_caps * bcmchan_getcaps(kobj_t obj, void *data) { return &bcm2835_audio_playcaps; } static kobj_method_t bcmchan_methods[] = { KOBJMETHOD(channel_init, bcmchan_init), KOBJMETHOD(channel_free, bcmchan_free), KOBJMETHOD(channel_setformat, bcmchan_setformat), KOBJMETHOD(channel_setspeed, bcmchan_setspeed), KOBJMETHOD(channel_setblocksize, bcmchan_setblocksize), KOBJMETHOD(channel_trigger, bcmchan_trigger), KOBJMETHOD(channel_getptr, bcmchan_getptr), KOBJMETHOD(channel_getcaps, bcmchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(bcmchan); /************************************************************/ static int bcmmix_init(struct snd_mixer *m) { mix_setdevs(m, SOUND_MASK_VOLUME); return (0); } static int bcmmix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct bcm2835_audio_info *sc = mix_getdevinfo(m); switch (dev) { case SOUND_MIXER_VOLUME: sc->volume = left; bcm2835_audio_update_controls(sc); break; default: break; } return left | (left << 8); } static kobj_method_t bcmmixer_methods[] = { KOBJMETHOD(mixer_init, bcmmix_init), KOBJMETHOD(mixer_set, bcmmix_set), KOBJMETHOD_END }; MIXER_DECLARE(bcmmixer); static int sysctl_bcm2835_audio_dest(SYSCTL_HANDLER_ARGS) { struct bcm2835_audio_info *sc = arg1; int val; int err; val = sc->dest; err = sysctl_handle_int(oidp, &val, 0, req); if (err || !req->newptr) /* error || read request */ return (err); if ((val < 0) || (val > 2)) return (EINVAL); sc->dest = val; device_printf(sc->dev, "destination set to %s\n", dest_description(val)); bcm2835_audio_update_controls(sc); return (0); } static void vchi_audio_sysctl_init(struct bcm2835_audio_info *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree_node; struct sysctl_oid_list *tree; /* * Add system sysctl tree/handlers. */ ctx = device_get_sysctl_ctx(sc->dev); tree_node = device_get_sysctl_tree(sc->dev); tree = SYSCTL_CHILDREN(tree_node); SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "dest", CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc), sysctl_bcm2835_audio_dest, "IU", "audio destination, " "0 - auto, 1 - headphones, 2 - HDMI"); } static void bcm2835_audio_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "pcm", 0); } static int bcm2835_audio_probe(device_t dev) { device_set_desc(dev, "VCHIQ audio"); return (BUS_PROBE_DEFAULT); } static void bcm2835_audio_delayed_init(void *xsc) { struct bcm2835_audio_info *sc; char status[SND_STATUSLEN]; sc = xsc; config_intrhook_disestablish(&sc->intr_hook); bcm2835_audio_init(sc); bcm2835_audio_open(sc); sc->volume = 75; sc->dest = DEST_AUTO; if (mixer_init(sc->dev, &bcmmixer_class, sc)) { device_printf(sc->dev, "mixer_init failed\n"); goto no; } if (pcm_register(sc->dev, sc, 1, 1)) { device_printf(sc->dev, "pcm_register failed\n"); goto no; } pcm_addchan(sc->dev, PCMDIR_PLAY, &bcmchan_class, sc); snprintf(status, SND_STATUSLEN, "at VCHIQ"); pcm_setstatus(sc->dev, status); bcm2835_audio_reset_channel(&sc->pch); bcm2835_audio_create_worker(sc); vchi_audio_sysctl_init(sc); no: ; } static int bcm2835_audio_attach(device_t dev) { struct bcm2835_audio_info *sc; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->dev = dev; sc->bufsz = VCHIQ_AUDIO_BUFFER_SIZE; sc->lock = snd_mtxcreate(device_get_nameunit(dev), "bcm2835_audio softc"); mtx_init(&sc->vchi_lock, "bcm2835_audio", "vchi_lock", MTX_DEF); mtx_init(&sc->data_lock, "data_mtx", "data_mtx", MTX_DEF); cv_init(&sc->data_cv, "data_cv"); sc->vchi_handle = VCHIQ_SERVICE_HANDLE_INVALID; /* * We need interrupts enabled for VCHI to work properly, * so delay intialization until it happens */ sc->intr_hook.ich_func = bcm2835_audio_delayed_init; sc->intr_hook.ich_arg = sc; if (config_intrhook_establish(&sc->intr_hook) != 0) goto no; return 0; no: return ENXIO; } static int bcm2835_audio_detach(device_t dev) { int r; struct bcm2835_audio_info *sc; sc = pcm_getdevinfo(dev); /* Stop worker thread */ sc->unloading = 1; cv_signal(&sc->data_cv); r = pcm_unregister(dev); if (r) return r; mtx_destroy(&sc->vchi_lock); mtx_destroy(&sc->data_lock); cv_destroy(&sc->data_cv); bcm2835_audio_release(sc); if (sc->lock) { snd_mtxfree(sc->lock); sc->lock = NULL; } free(sc, M_DEVBUF); return 0; } static device_method_t bcm2835_audio_methods[] = { /* Device interface */ DEVMETHOD(device_identify, bcm2835_audio_identify), DEVMETHOD(device_probe, bcm2835_audio_probe), DEVMETHOD(device_attach, bcm2835_audio_attach), DEVMETHOD(device_detach, bcm2835_audio_detach), { 0, 0 } }; static driver_t bcm2835_audio_driver = { "pcm", bcm2835_audio_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(bcm2835_audio, vchiq, bcm2835_audio_driver, pcm_devclass, 0, 0); MODULE_DEPEND(bcm2835_audio, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_DEPEND(bcm2835_audio, vchiq, 1, 1, 1); MODULE_VERSION(bcm2835_audio, 1); Index: projects/release-pkg/sys/arm64/arm64/locore.S =================================================================== --- projects/release-pkg/sys/arm64/arm64/locore.S (revision 297926) +++ projects/release-pkg/sys/arm64/arm64/locore.S (revision 297927) @@ -1,684 +1,697 @@ /*- * Copyright (c) 2012-2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "assym.s" #include "opt_kstack_pages.h" #include #include #include #include #include #include #include #define VIRT_BITS 48 +#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT) .globl kernbase .set kernbase, KERNBASE #define DEVICE_MEM 0 #define NORMAL_UNCACHED 1 #define NORMAL_MEM 2 /* * We assume: * MMU on with an identity map, or off * D-Cache: off * I-Cache: on or off * We are loaded at a 2MiB aligned address */ .text .globl _start _start: /* Drop to EL1 */ bl drop_to_el1 /* * Disable the MMU. We may have entered the kernel with it on and * will need to update the tables later. If this has been set up * with anything other than a VA == PA map then this will fail, * but in this case the code to find where we are running from * would have also failed. */ dsb sy mrs x2, sctlr_el1 bic x2, x2, SCTLR_M msr sctlr_el1, x2 isb /* Set the context id */ msr contextidr_el1, xzr /* Get the virt -> phys offset */ bl get_virt_delta /* * At this point: * x29 = PA - VA * x28 = Our physical load address */ /* Create the page tables */ bl create_pagetables /* * At this point: * x27 = TTBR0 table * x26 = Kernel L1 table * x24 = TTBR1 table */ /* Enable the mmu */ bl start_mmu /* Jump to the virtual address space */ ldr x15, .Lvirtdone br x15 virtdone: /* Set up the stack */ adr x25, initstack_end mov sp, x25 sub sp, sp, #PCB_SIZE /* Zero the BSS */ ldr x15, .Lbss ldr x14, .Lend 1: str xzr, [x15], #8 cmp x15, x14 b.lo 1b /* Backup the module pointer */ mov x1, x0 /* Make the page table base a virtual address */ sub x26, x26, x29 sub x24, x24, x29 sub sp, sp, #(64 * 4) mov x0, sp /* Degate the delda so it is VA -> PA */ neg x29, x29 str x1, [x0] /* modulep */ str x26, [x0, 8] /* kern_l1pt */ str x29, [x0, 16] /* kern_delta */ str x25, [x0, 24] /* kern_stack */ str x24, [x0, 32] /* kern_l0pt */ /* trace back starts here */ mov fp, #0 /* Branch to C code */ bl initarm bl mi_startup /* We should not get here */ brk 0 .align 3 .Lvirtdone: .quad virtdone .Lbss: .quad __bss_start .Lend: .quad _end #ifdef SMP /* * mpentry(unsigned long) * * Called by a core when it is being brought online. * The data in x0 is passed straight to init_secondary. */ ENTRY(mpentry) /* Disable interrupts */ msr daifset, #2 /* Drop to EL1 */ bl drop_to_el1 /* Set the context id */ msr contextidr_el1, x1 /* Load the kernel page table */ adr x24, pagetable_l0_ttbr1 /* Load the identity page table */ adr x27, pagetable_l0_ttbr0 /* Enable the mmu */ bl start_mmu /* Jump to the virtual address space */ ldr x15, =mp_virtdone br x15 mp_virtdone: ldr x4, =secondary_stacks mov x5, #(PAGE_SIZE * KSTACK_PAGES) mul x5, x0, x5 add sp, x4, x5 b init_secondary END(mpentry) #endif /* * If we are started in EL2, configure the required hypervisor * registers and drop to EL1. */ drop_to_el1: mrs x1, CurrentEL lsr x1, x1, #2 cmp x1, #0x2 b.eq 1f ret 1: /* Configure the Hypervisor */ mov x2, #(HCR_RW) msr hcr_el2, x2 /* Load the Virtualization Process ID Register */ mrs x2, midr_el1 msr vpidr_el2, x2 /* Load the Virtualization Multiprocess ID Register */ mrs x2, mpidr_el1 msr vmpidr_el2, x2 /* Set the bits that need to be 1 in sctlr_el1 */ ldr x2, .Lsctlr_res1 msr sctlr_el1, x2 /* Don't trap to EL2 for exceptions */ mov x2, #CPTR_RES1 msr cptr_el2, x2 /* Don't trap to EL2 for CP15 traps */ msr hstr_el2, xzr /* Enable access to the physical timers at EL1 */ mrs x2, cnthctl_el2 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) msr cnthctl_el2, x2 /* Set the counter offset to a known value */ msr cntvoff_el2, xzr /* Hypervisor trap functions */ adr x2, hyp_vectors msr vbar_el2, x2 mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h) msr spsr_el2, x2 /* Configure GICv3 CPU interface */ mrs x2, id_aa64pfr0_el1 /* Extract GIC bits from the register */ ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */ cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT) b.ne 2f mrs x2, icc_sre_el2 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */ orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */ msr icc_sre_el2, x2 2: /* Set the address to return to our return address */ msr elr_el2, x30 isb eret .align 3 .Lsctlr_res1: .quad SCTLR_RES1 #define VECT_EMPTY \ .align 7; \ 1: b 1b .align 11 hyp_vectors: VECT_EMPTY /* Synchronous EL2t */ VECT_EMPTY /* IRQ EL2t */ VECT_EMPTY /* FIQ EL2t */ VECT_EMPTY /* Error EL2t */ VECT_EMPTY /* Synchronous EL2h */ VECT_EMPTY /* IRQ EL2h */ VECT_EMPTY /* FIQ EL2h */ VECT_EMPTY /* Error EL2h */ VECT_EMPTY /* Synchronous 64-bit EL1 */ VECT_EMPTY /* IRQ 64-bit EL1 */ VECT_EMPTY /* FIQ 64-bit EL1 */ VECT_EMPTY /* Error 64-bit EL1 */ VECT_EMPTY /* Synchronous 32-bit EL1 */ VECT_EMPTY /* IRQ 32-bit EL1 */ VECT_EMPTY /* FIQ 32-bit EL1 */ VECT_EMPTY /* Error 32-bit EL1 */ /* * Get the delta between the physical address we were loaded to and the * virtual address we expect to run from. This is used when building the * initial page table. */ get_virt_delta: /* Load the physical address of virt_map */ adr x29, virt_map /* Load the virtual address of virt_map stored in virt_map */ ldr x28, [x29] /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */ sub x29, x29, x28 /* Find the load address for the kernel */ mov x28, #(KERNBASE) add x28, x28, x29 ret .align 3 virt_map: .quad virt_map /* * This builds the page tables containing the identity map, and the kernel * virtual map. * * It relys on: * We were loaded to an address that is on a 2MiB boundary * All the memory must not cross a 1GiB boundaty * x28 contains the physical address we were loaded from * * TODO: This is out of date. * There are at least 5 pages before that address for the page tables * The pages used are: - * - The identity (PA = VA) table (TTBR0) - * - The Kernel L1 table (TTBR1)(not yet) - * - The PA != VA L2 table to jump into (not yet) - * - The FDT L2 table (not yet) + * - The Kernel L2 table + * - The Kernel L1 table + * - The Kernel L0 table (TTBR1) + * - The identity (PA = VA) L1 table + * - The identity (PA = VA) L0 table (TTBR0) + * - The DMAP L1 tables */ create_pagetables: /* Save the Link register */ mov x5, x30 /* Clean the page table */ adr x6, pagetable mov x26, x6 adr x27, pagetable_end 1: stp xzr, xzr, [x6], #16 stp xzr, xzr, [x6], #16 stp xzr, xzr, [x6], #16 stp xzr, xzr, [x6], #16 cmp x6, x27 b.lo 1b /* * Build the TTBR1 maps. */ /* Find the size of the kernel */ mov x6, #(KERNBASE) ldr x7, .Lend /* Find the end - begin */ sub x8, x7, x6 /* Get the number of l2 pages to allocate, rounded down */ lsr x10, x8, #(L2_SHIFT) /* Add 8 MiB for any rounding above and the module data */ add x10, x10, #4 /* Create the kernel space L2 table */ mov x6, x26 mov x7, #NORMAL_MEM mov x8, #(KERNBASE & L2_BLOCK_MASK) mov x9, x28 bl build_l2_block_pagetable /* Move to the l1 table */ add x26, x26, #PAGE_SIZE /* Link the l1 -> l2 table */ mov x9, x6 mov x6, x26 bl link_l1_pagetable /* Move to the l0 table */ add x24, x26, #PAGE_SIZE /* Link the l0 -> l1 table */ mov x9, x6 mov x6, x24 mov x10, #1 bl link_l0_pagetable + /* Link the DMAP tables */ + ldr x8, =DMAP_MIN_ADDRESS + adr x9, pagetable_dmap; + mov x10, #DMAP_TABLES + bl link_l0_pagetable + /* * Build the TTBR0 maps. */ add x27, x24, #PAGE_SIZE mov x6, x27 /* The initial page table */ #if defined(SOCDEV_PA) && defined(SOCDEV_VA) /* Create a table for the UART */ mov x7, #DEVICE_MEM mov x8, #(SOCDEV_VA) /* VA start */ mov x9, #(SOCDEV_PA) /* PA start */ mov x10, #1 bl build_l1_block_pagetable #endif /* Create the VA = PA map */ mov x7, #NORMAL_UNCACHED /* Uncached as it's only needed early on */ mov x9, x27 mov x8, x9 /* VA start (== PA start) */ mov x10, #1 bl build_l1_block_pagetable /* Move to the l0 table */ add x27, x27, #PAGE_SIZE /* Link the l0 -> l1 table */ mov x9, x6 mov x6, x27 mov x10, #1 bl link_l0_pagetable /* Restore the Link register */ mov x30, x5 ret /* * Builds an L0 -> L1 table descriptor * * This is a link for a 512GiB block of memory with up to 1GiB regions mapped * within it by build_l1_block_pagetable. * * x6 = L0 table * x8 = Virtual Address * x9 = L1 PA (trashed) * x10 = Entry count * x11, x12 and x13 are trashed */ link_l0_pagetable: /* * Link an L0 -> L1 table entry. */ /* Find the table index */ lsr x11, x8, #L0_SHIFT and x11, x11, #L0_ADDR_MASK /* Build the L0 block entry */ mov x12, #L0_TABLE /* Only use the output address bits */ lsr x9, x9, #PAGE_SHIFT 1: orr x13, x12, x9, lsl #PAGE_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] sub x10, x10, #1 add x11, x11, #1 add x9, x9, #1 cbnz x10, 1b ret /* * Builds an L1 -> L2 table descriptor * * This is a link for a 1GiB block of memory with up to 2MiB regions mapped * within it by build_l2_block_pagetable. * * x6 = L1 table * x8 = Virtual Address * x9 = L2 PA (trashed) * x11, x12 and x13 are trashed */ link_l1_pagetable: /* * Link an L1 -> L2 table entry. */ /* Find the table index */ lsr x11, x8, #L1_SHIFT and x11, x11, #Ln_ADDR_MASK /* Build the L1 block entry */ mov x12, #L1_TABLE /* Only use the output address bits */ lsr x9, x9, #PAGE_SHIFT orr x13, x12, x9, lsl #PAGE_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] ret /* * Builds count 1 GiB page table entry * x6 = L1 table * x7 = Type (0 = Device, 1 = Normal) * x8 = VA start * x9 = PA start (trashed) * x10 = Entry count * x11, x12 and x13 are trashed */ build_l1_block_pagetable: /* * Build the L1 table entry. */ /* Find the table index */ lsr x11, x8, #L1_SHIFT and x11, x11, #Ln_ADDR_MASK /* Build the L1 block entry */ lsl x12, x7, #2 orr x12, x12, #L1_BLOCK orr x12, x12, #(ATTR_AF) #ifdef SMP orr x12, x12, ATTR_SH(ATTR_SH_IS) #endif /* Only use the output address bits */ lsr x9, x9, #L1_SHIFT /* Set the physical address for this virtual address */ 1: orr x13, x12, x9, lsl #L1_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] sub x10, x10, #1 add x11, x11, #1 add x9, x9, #1 cbnz x10, 1b ret /* * Builds count 2 MiB page table entry * x6 = L2 table * x7 = Type (0 = Device, 1 = Normal) * x8 = VA start * x9 = PA start (trashed) * x10 = Entry count * x11, x12 and x13 are trashed */ build_l2_block_pagetable: /* * Build the L2 table entry. */ /* Find the table index */ lsr x11, x8, #L2_SHIFT and x11, x11, #Ln_ADDR_MASK /* Build the L2 block entry */ lsl x12, x7, #2 orr x12, x12, #L2_BLOCK orr x12, x12, #(ATTR_AF) #ifdef SMP orr x12, x12, ATTR_SH(ATTR_SH_IS) #endif /* Only use the output address bits */ lsr x9, x9, #L2_SHIFT /* Set the physical address for this virtual address */ 1: orr x13, x12, x9, lsl #L2_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] sub x10, x10, #1 add x11, x11, #1 add x9, x9, #1 cbnz x10, 1b ret start_mmu: dsb sy /* Load the exception vectors */ ldr x2, =exception_vectors msr vbar_el1, x2 /* Load ttbr0 and ttbr1 */ msr ttbr0_el1, x27 msr ttbr1_el1, x24 isb /* Clear the Monitor Debug System control register */ msr mdscr_el1, xzr /* Invalidate the TLB */ tlbi vmalle1is ldr x2, mair msr mair_el1, x2 /* * Setup TCR according to PARange bits from ID_AA64MMFR0_EL1. */ ldr x2, tcr mrs x3, id_aa64mmfr0_el1 bfi x2, x3, #32, #3 msr tcr_el1, x2 /* Setup SCTLR */ ldr x2, sctlr_set ldr x3, sctlr_clear mrs x1, sctlr_el1 bic x1, x1, x3 /* Clear the required bits */ orr x1, x1, x2 /* Set the required bits */ msr sctlr_el1, x1 isb ret .align 3 mair: /* Device Normal, no cache Normal, write-back */ .quad MAIR_ATTR(0x00, 0) | MAIR_ATTR(0x44, 1) | MAIR_ATTR(0xff, 2) tcr: .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_ASID_16 | TCR_TG1_4K | \ TCR_CACHE_ATTRS | TCR_SMP_ATTRS) sctlr_set: /* Bits to set */ .quad (SCTLR_UCI | SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | SCTLR_M) sctlr_clear: /* Bits to clear */ .quad (SCTLR_EE | SCTLR_EOE | SCTLR_WXN | SCTLR_UMA | SCTLR_ITD | \ SCTLR_THEE | SCTLR_CP15BEN | SCTLR_A) .globl abort abort: b abort //.section .init_pagetable .align 12 /* 4KiB aligned */ /* * 3 initial tables (in the following order): * L2 for kernel (High addresses) * L1 for kernel * L1 for user (Low addresses) */ pagetable: .space PAGE_SIZE pagetable_l1_ttbr1: .space PAGE_SIZE pagetable_l0_ttbr1: .space PAGE_SIZE pagetable_l1_ttbr0: .space PAGE_SIZE pagetable_l0_ttbr0: .space PAGE_SIZE + + .globl pagetable_dmap +pagetable_dmap: + .space PAGE_SIZE * DMAP_TABLES pagetable_end: el2_pagetable: .space PAGE_SIZE .globl init_pt_va init_pt_va: .quad pagetable /* XXX: Keep page tables VA */ .align 4 initstack: .space (PAGE_SIZE * KSTACK_PAGES) initstack_end: ENTRY(sigcode) mov x0, sp add x0, x0, #SF_UC 1: mov x8, #SYS_sigreturn svc 0 /* sigreturn failed, exit */ mov x8, #SYS_exit svc 0 b 1b END(sigcode) /* This may be copied to the stack, keep it 16-byte aligned */ .align 3 esigcode: .data .align 3 .global szsigcode szsigcode: .quad esigcode - sigcode Index: projects/release-pkg/sys/arm64/arm64/pmap.c =================================================================== --- projects/release-pkg/sys/arm64/arm64/pmap.c (revision 297926) +++ projects/release-pkg/sys/arm64/arm64/pmap.c (revision 297927) @@ -1,3579 +1,3583 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 2003 Peter Wemm * All rights reserved. * Copyright (c) 2005-2010 Alan L. Cox * All rights reserved. * Copyright (c) 2014 Andrew Turner * All rights reserved. * Copyright (c) 2014-2016 The FreeBSD Foundation * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 */ /*- * Copyright (c) 2003 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Jake Burkholder, * Safeport Network Services, and Network Associates Laboratories, the * Security Research Division of Network Associates, Inc. under * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA * CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Manages physical address maps. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define NL0PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL1PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL2PG (PAGE_SIZE/(sizeof (pd_entry_t))) #define NL3PG (PAGE_SIZE/(sizeof (pt_entry_t))) #define NUL0E L0_ENTRIES #define NUL1E (NUL0E * NL1PG) #define NUL2E (NUL1E * NL2PG) #if !defined(DIAGNOSTIC) #ifdef __GNUC_GNU_INLINE__ #define PMAP_INLINE __attribute__((__gnu_inline__)) inline #else #define PMAP_INLINE extern inline #endif #else #define PMAP_INLINE #endif /* * These are configured by the mair_el1 register. This is set up in locore.S */ #define DEVICE_MEMORY 0 #define UNCACHED_MEMORY 1 #define CACHED_MEMORY 2 #ifdef PV_STATS #define PV_STAT(x) do { x ; } while (0) #else #define PV_STAT(x) do { } while (0) #endif #define pmap_l2_pindex(v) ((v) >> L2_SHIFT) #define NPV_LIST_LOCKS MAXCPU #define PHYS_TO_PV_LIST_LOCK(pa) \ (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS]) #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ struct rwlock **_lockp = (lockp); \ struct rwlock *_new_lock; \ \ _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \ if (_new_lock != *_lockp) { \ if (*_lockp != NULL) \ rw_wunlock(*_lockp); \ *_lockp = _new_lock; \ rw_wlock(*_lockp); \ } \ } while (0) #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) #define RELEASE_PV_LIST_LOCK(lockp) do { \ struct rwlock **_lockp = (lockp); \ \ if (*_lockp != NULL) { \ rw_wunlock(*_lockp); \ *_lockp = NULL; \ } \ } while (0) #define VM_PAGE_TO_PV_LIST_LOCK(m) \ PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)) struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ vm_offset_t kernel_vm_end = 0; struct msgbuf *msgbufp = NULL; static struct rwlock_padalign pvh_global_lock; vm_paddr_t dmap_phys_base; /* The start of the dmap region */ +/* This code assumes all L1 DMAP entries will be used */ +CTASSERT((DMAP_MIN_ADDRESS & ~L0_OFFSET) == DMAP_MIN_ADDRESS); +CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS); + +#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT) +extern pt_entry_t pagetable_dmap[]; + /* * Data for the pv entry allocation mechanism */ static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); static struct mtx pv_chunks_mutex; static struct rwlock pv_list_locks[NPV_LIST_LOCKS]; static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva, pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp); static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp); static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free); static int pmap_unuse_l3(pmap_t, vm_offset_t, pd_entry_t, struct spglist *); /* * These load the old table data and store the new value. * They need to be atomic as the System MMU may write to the table at * the same time as the CPU. */ #define pmap_load_store(table, entry) atomic_swap_64(table, entry) #define pmap_set(table, mask) atomic_set_64(table, mask) #define pmap_load_clear(table) atomic_swap_64(table, 0) #define pmap_load(table) (*table) /********************/ /* Inline functions */ /********************/ static __inline void pagecopy(void *s, void *d) { memcpy(d, s, PAGE_SIZE); } #define pmap_l0_index(va) (((va) >> L0_SHIFT) & L0_ADDR_MASK) #define pmap_l1_index(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK) #define pmap_l2_index(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK) #define pmap_l3_index(va) (((va) >> L3_SHIFT) & Ln_ADDR_MASK) static __inline pd_entry_t * pmap_l0(pmap_t pmap, vm_offset_t va) { return (&pmap->pm_l0[pmap_l0_index(va)]); } static __inline pd_entry_t * pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va) { pd_entry_t *l1; l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); return (&l1[pmap_l1_index(va)]); } static __inline pd_entry_t * pmap_l1(pmap_t pmap, vm_offset_t va) { pd_entry_t *l0; l0 = pmap_l0(pmap, va); if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE) return (NULL); return (pmap_l0_to_l1(l0, va)); } static __inline pd_entry_t * pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va) { pd_entry_t *l2; l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK); return (&l2[pmap_l2_index(va)]); } static __inline pd_entry_t * pmap_l2(pmap_t pmap, vm_offset_t va) { pd_entry_t *l1; l1 = pmap_l1(pmap, va); if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE) return (NULL); return (pmap_l1_to_l2(l1, va)); } static __inline pt_entry_t * pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va) { pt_entry_t *l3; l3 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK); return (&l3[pmap_l3_index(va)]); } /* * Returns the lowest valid pde for a given virtual address. * The next level may or may not point to a valid page or block. */ static __inline pd_entry_t * pmap_pde(pmap_t pmap, vm_offset_t va, int *level) { pd_entry_t *l0, *l1, *l2, desc; l0 = pmap_l0(pmap, va); desc = pmap_load(l0) & ATTR_DESCR_MASK; if (desc != L0_TABLE) { *level = -1; return (NULL); } l1 = pmap_l0_to_l1(l0, va); desc = pmap_load(l1) & ATTR_DESCR_MASK; if (desc != L1_TABLE) { *level = 0; return (l0); } l2 = pmap_l1_to_l2(l1, va); desc = pmap_load(l2) & ATTR_DESCR_MASK; if (desc != L2_TABLE) { *level = 1; return (l1); } *level = 2; return (l2); } /* * Returns the lowest valid pte block or table entry for a given virtual * address. If there are no valid entries return NULL and set the level to * the first invalid level. */ static __inline pt_entry_t * pmap_pte(pmap_t pmap, vm_offset_t va, int *level) { pd_entry_t *l1, *l2, desc; pt_entry_t *l3; l1 = pmap_l1(pmap, va); if (l1 == NULL) { *level = 0; return (NULL); } desc = pmap_load(l1) & ATTR_DESCR_MASK; if (desc == L1_BLOCK) { *level = 1; return (l1); } if (desc != L1_TABLE) { *level = 1; return (NULL); } l2 = pmap_l1_to_l2(l1, va); desc = pmap_load(l2) & ATTR_DESCR_MASK; if (desc == L2_BLOCK) { *level = 2; return (l2); } if (desc != L2_TABLE) { *level = 2; return (NULL); } *level = 3; l3 = pmap_l2_to_l3(l2, va); if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE) return (NULL); return (l3); } bool pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1, pd_entry_t **l2, pt_entry_t **l3) { pd_entry_t *l0p, *l1p, *l2p; if (pmap->pm_l0 == NULL) return (false); l0p = pmap_l0(pmap, va); *l0 = l0p; if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE) return (false); l1p = pmap_l0_to_l1(l0p, va); *l1 = l1p; if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) { *l2 = NULL; *l3 = NULL; return (true); } if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE) return (false); l2p = pmap_l1_to_l2(l1p, va); *l2 = l2p; if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) { *l3 = NULL; return (true); } *l3 = pmap_l2_to_l3(l2p, va); return (true); } static __inline int pmap_is_current(pmap_t pmap) { return ((pmap == pmap_kernel()) || (pmap == curthread->td_proc->p_vmspace->vm_map.pmap)); } static __inline int pmap_l3_valid(pt_entry_t l3) { return ((l3 & ATTR_DESCR_MASK) == L3_PAGE); } static __inline int pmap_l3_valid_cacheable(pt_entry_t l3) { return (((l3 & ATTR_DESCR_MASK) == L3_PAGE) && ((l3 & ATTR_IDX_MASK) == ATTR_IDX(CACHED_MEMORY))); } #define PTE_SYNC(pte) cpu_dcache_wb_range((vm_offset_t)pte, sizeof(*pte)) /* * Checks if the page is dirty. We currently lack proper tracking of this on * arm64 so for now assume is a page mapped as rw was accessed it is. */ static inline int pmap_page_dirty(pt_entry_t pte) { return ((pte & (ATTR_AF | ATTR_AP_RW_BIT)) == (ATTR_AF | ATTR_AP(ATTR_AP_RW))); } static __inline void pmap_resident_count_inc(pmap_t pmap, int count) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); pmap->pm_stats.resident_count += count; } static __inline void pmap_resident_count_dec(pmap_t pmap, int count) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(pmap->pm_stats.resident_count >= count, ("pmap %p resident count underflow %ld %d", pmap, pmap->pm_stats.resident_count, count)); pmap->pm_stats.resident_count -= count; } static pt_entry_t * pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot, u_int *l2_slot) { pt_entry_t *l2; pd_entry_t *l1; l1 = (pd_entry_t *)l1pt; *l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK; /* Check locore has used a table L1 map */ KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE, ("Invalid bootstrap L1 table")); /* Find the address of the L2 table */ l2 = (pt_entry_t *)init_pt_va; *l2_slot = pmap_l2_index(va); return (l2); } static vm_paddr_t pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va) { u_int l1_slot, l2_slot; pt_entry_t *l2; l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot); return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET)); } static void -pmap_bootstrap_dmap(vm_offset_t l1pt, vm_paddr_t kernstart) +pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t kernstart) { vm_offset_t va; vm_paddr_t pa; - pd_entry_t *l1; u_int l1_slot; pa = dmap_phys_base = kernstart & ~L1_OFFSET; va = DMAP_MIN_ADDRESS; - l1 = (pd_entry_t *)l1pt; - l1_slot = pmap_l1_index(DMAP_MIN_ADDRESS); - for (; va < DMAP_MAX_ADDRESS; pa += L1_SIZE, va += L1_SIZE, l1_slot++) { - KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index")); + l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT); - pmap_load_store(&l1[l1_slot], + pmap_load_store(&pagetable_dmap[l1_slot], (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_IDX(CACHED_MEMORY) | L1_BLOCK); } - cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE); + cpu_dcache_wb_range((vm_offset_t)pagetable_dmap, + PAGE_SIZE * DMAP_TABLES); cpu_tlb_flushID(); } static vm_offset_t pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start) { vm_offset_t l2pt; vm_paddr_t pa; pd_entry_t *l1; u_int l1_slot; KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address")); l1 = (pd_entry_t *)l1pt; l1_slot = pmap_l1_index(va); l2pt = l2_start; for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) { KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index")); pa = pmap_early_vtophys(l1pt, l2pt); pmap_load_store(&l1[l1_slot], (pa & ~Ln_TABLE_MASK) | L1_TABLE); l2pt += PAGE_SIZE; } /* Clean the L2 page table */ memset((void *)l2_start, 0, l2pt - l2_start); cpu_dcache_wb_range(l2_start, l2pt - l2_start); /* Flush the l1 table to ram */ cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE); return l2pt; } static vm_offset_t pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start) { vm_offset_t l2pt, l3pt; vm_paddr_t pa; pd_entry_t *l2; u_int l2_slot; KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address")); l2 = pmap_l2(kernel_pmap, va); l2 = (pd_entry_t *)((uintptr_t)l2 & ~(PAGE_SIZE - 1)); l2pt = (vm_offset_t)l2; l2_slot = pmap_l2_index(va); l3pt = l3_start; for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) { KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); pa = pmap_early_vtophys(l1pt, l3pt); pmap_load_store(&l2[l2_slot], (pa & ~Ln_TABLE_MASK) | L2_TABLE); l3pt += PAGE_SIZE; } /* Clean the L2 page table */ memset((void *)l3_start, 0, l3pt - l3_start); cpu_dcache_wb_range(l3_start, l3pt - l3_start); cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE); return l3pt; } /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) { u_int l1_slot, l2_slot, avail_slot, map_slot, used_map_slot; uint64_t kern_delta; pt_entry_t *l2; vm_offset_t va, freemempos; vm_offset_t dpcpu, msgbufpv; vm_paddr_t pa, min_pa; int i; kern_delta = KERNBASE - kernstart; physmem = 0; printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen); printf("%lx\n", l1pt); printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK); /* Set this early so we can use the pagetable walking functions */ kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt; PMAP_LOCK_INIT(kernel_pmap); /* * Initialize the global pv list lock. */ rw_init(&pvh_global_lock, "pmap pv global"); /* Assume the address we were loaded to is a valid physical address */ min_pa = KERNBASE - kern_delta; /* * Find the minimum physical address. physmap is sorted, * but may contain empty ranges. */ for (i = 0; i < (physmap_idx * 2); i += 2) { if (physmap[i] == physmap[i + 1]) continue; if (physmap[i] <= min_pa) min_pa = physmap[i]; break; } /* Create a direct map region early so we can use it for pa -> va */ pmap_bootstrap_dmap(l1pt, min_pa); va = KERNBASE; pa = KERNBASE - kern_delta; /* * Start to initialise phys_avail by copying from physmap * up to the physical address KERNBASE points at. */ map_slot = avail_slot = 0; for (; map_slot < (physmap_idx * 2) && avail_slot < (PHYS_AVAIL_SIZE - 2); map_slot += 2) { if (physmap[map_slot] == physmap[map_slot + 1]) continue; if (physmap[map_slot] <= pa && physmap[map_slot + 1] > pa) break; phys_avail[avail_slot] = physmap[map_slot]; phys_avail[avail_slot + 1] = physmap[map_slot + 1]; physmem += (phys_avail[avail_slot + 1] - phys_avail[avail_slot]) >> PAGE_SHIFT; avail_slot += 2; } /* Add the memory before the kernel */ if (physmap[avail_slot] < pa && avail_slot < (PHYS_AVAIL_SIZE - 2)) { phys_avail[avail_slot] = physmap[map_slot]; phys_avail[avail_slot + 1] = pa; physmem += (phys_avail[avail_slot + 1] - phys_avail[avail_slot]) >> PAGE_SHIFT; avail_slot += 2; } used_map_slot = map_slot; /* * Read the page table to find out what is already mapped. * This assumes we have mapped a block of memory from KERNBASE * using a single L1 entry. */ l2 = pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot); /* Sanity check the index, KERNBASE should be the first VA */ KASSERT(l2_slot == 0, ("The L2 index is non-zero")); /* Find how many pages we have mapped */ for (; l2_slot < Ln_ENTRIES; l2_slot++) { if ((l2[l2_slot] & ATTR_DESCR_MASK) == 0) break; /* Check locore used L2 blocks */ KASSERT((l2[l2_slot] & ATTR_DESCR_MASK) == L2_BLOCK, ("Invalid bootstrap L2 table")); KASSERT((l2[l2_slot] & ~ATTR_MASK) == pa, ("Incorrect PA in L2 table")); va += L2_SIZE; pa += L2_SIZE; } va = roundup2(va, L1_SIZE); freemempos = KERNBASE + kernlen; freemempos = roundup2(freemempos, PAGE_SIZE); /* Create the l2 tables up to VM_MAX_KERNEL_ADDRESS */ freemempos = pmap_bootstrap_l2(l1pt, va, freemempos); /* And the l3 tables for the early devmap */ freemempos = pmap_bootstrap_l3(l1pt, VM_MAX_KERNEL_ADDRESS - L2_SIZE, freemempos); cpu_tlb_flushID(); #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); /* Allocate dynamic per-cpu area. */ alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu, 0); /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */ alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); msgbufp = (void *)msgbufpv; virtual_avail = roundup2(freemempos, L1_SIZE); virtual_end = VM_MAX_KERNEL_ADDRESS - L2_SIZE; kernel_vm_end = virtual_avail; pa = pmap_early_vtophys(l1pt, freemempos); /* Finish initialising physmap */ map_slot = used_map_slot; for (; avail_slot < (PHYS_AVAIL_SIZE - 2) && map_slot < (physmap_idx * 2); map_slot += 2) { if (physmap[map_slot] == physmap[map_slot + 1]) continue; /* Have we used the current range? */ if (physmap[map_slot + 1] <= pa) continue; /* Do we need to split the entry? */ if (physmap[map_slot] < pa) { phys_avail[avail_slot] = pa; phys_avail[avail_slot + 1] = physmap[map_slot + 1]; } else { phys_avail[avail_slot] = physmap[map_slot]; phys_avail[avail_slot + 1] = physmap[map_slot + 1]; } physmem += (phys_avail[avail_slot + 1] - phys_avail[avail_slot]) >> PAGE_SHIFT; avail_slot += 2; } phys_avail[avail_slot] = 0; phys_avail[avail_slot + 1] = 0; /* * Maxmem isn't the "maximum memory", it's one larger than the * highest page of the physical address space. It should be * called something like "Maxphyspage". */ Maxmem = atop(phys_avail[avail_slot - 1]); cpu_tlb_flushID(); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pv_memattr = VM_MEMATTR_WRITE_BACK; } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { int i; /* * Initialize the pv chunk list mutex. */ mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); /* * Initialize the pool of pv list locks. */ for (i = 0; i < NPV_LIST_LOCKS; i++) rw_init(&pv_list_locks[i], "pmap pv list"); } /* * Normal, non-SMP, invalidation functions. * We inline these within pmap.c for speed. */ PMAP_INLINE void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { sched_pin(); __asm __volatile( "dsb sy \n" "tlbi vaae1is, %0 \n" "dsb sy \n" "isb \n" : : "r"(va >> PAGE_SHIFT)); sched_unpin(); } PMAP_INLINE void pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t addr; sched_pin(); __asm __volatile("dsb sy"); for (addr = sva; addr < eva; addr += PAGE_SIZE) { __asm __volatile( "tlbi vaae1is, %0" : : "r"(addr >> PAGE_SHIFT)); } __asm __volatile( "dsb sy \n" "isb \n"); sched_unpin(); } PMAP_INLINE void pmap_invalidate_all(pmap_t pmap) { sched_pin(); __asm __volatile( "dsb sy \n" "tlbi vmalle1is \n" "dsb sy \n" "isb \n"); sched_unpin(); } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { pt_entry_t *pte, tpte; vm_paddr_t pa; int lvl; pa = 0; PMAP_LOCK(pmap); /* * Find the block or page map for this virtual address. pmap_pte * will return either a valid block/page entry, or NULL. */ pte = pmap_pte(pmap, va, &lvl); if (pte != NULL) { tpte = pmap_load(pte); pa = tpte & ~ATTR_MASK; switch(lvl) { case 1: KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK, ("pmap_extract: Invalid L1 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L1_OFFSET); break; case 2: KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK, ("pmap_extract: Invalid L2 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L2_OFFSET); break; case 3: KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE, ("pmap_extract: Invalid L3 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L3_OFFSET); break; } } PMAP_UNLOCK(pmap); return (pa); } /* * Routine: pmap_extract_and_hold * Function: * Atomically extract and hold the physical page * with the given pmap and virtual address pair * if that mapping permits the given protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { pt_entry_t *pte, tpte; vm_paddr_t pa; vm_page_t m; int lvl; pa = 0; m = NULL; PMAP_LOCK(pmap); retry: pte = pmap_pte(pmap, va, &lvl); if (pte != NULL) { tpte = pmap_load(pte); KASSERT(lvl > 0 && lvl <= 3, ("pmap_extract_and_hold: Invalid level %d", lvl)); CTASSERT(L1_BLOCK == L2_BLOCK); KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) || (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK), ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl, tpte & ATTR_DESCR_MASK)); if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) || ((prot & VM_PROT_WRITE) == 0)) { if (vm_page_pa_tryrelock(pmap, tpte & ~ATTR_MASK, &pa)) goto retry; m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK); vm_page_hold(m); } } PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } vm_paddr_t pmap_kextract(vm_offset_t va) { pt_entry_t *pte, tpte; vm_paddr_t pa; int lvl; if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { pa = DMAP_TO_PHYS(va); } else { pa = 0; pte = pmap_pte(kernel_pmap, va, &lvl); if (pte != NULL) { tpte = pmap_load(pte); pa = tpte & ~ATTR_MASK; switch(lvl) { case 1: KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK, ("pmap_kextract: Invalid L1 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L1_OFFSET); break; case 2: KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK, ("pmap_kextract: Invalid L2 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L2_OFFSET); break; case 3: KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE, ("pmap_kextract: Invalid L3 pte found: %lx", tpte & ATTR_DESCR_MASK)); pa |= (va & L3_OFFSET); break; } } } return (pa); } /*************************************************** * Low level mapping routines..... ***************************************************/ void pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa) { pd_entry_t *pde; pt_entry_t *pte; vm_offset_t va; int lvl; KASSERT((pa & L3_OFFSET) == 0, ("pmap_kenter_device: Invalid physical address")); KASSERT((sva & L3_OFFSET) == 0, ("pmap_kenter_device: Invalid virtual address")); KASSERT((size & PAGE_MASK) == 0, ("pmap_kenter_device: Mapping is not page-sized")); va = sva; while (size != 0) { pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_kenter_device: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_kenter_device: Invalid level %d", lvl)); pte = pmap_l2_to_l3(pde, va); pmap_load_store(pte, (pa & ~L3_OFFSET) | ATTR_DEFAULT | ATTR_IDX(DEVICE_MEMORY) | L3_PAGE); PTE_SYNC(pte); va += PAGE_SIZE; pa += PAGE_SIZE; size -= PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /* * Remove a page from the kernel pagetables. */ PMAP_INLINE void pmap_kremove(vm_offset_t va) { pt_entry_t *pte; int lvl; pte = pmap_pte(kernel_pmap, va, &lvl); KASSERT(pte != NULL, ("pmap_kremove: Invalid address")); KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl)); if (pmap_l3_valid_cacheable(pmap_load(pte))) cpu_dcache_wb_range(va, L3_SIZE); pmap_load_clear(pte); PTE_SYNC(pte); pmap_invalidate_page(kernel_pmap, va); } void pmap_kremove_device(vm_offset_t sva, vm_size_t size) { pt_entry_t *pte; vm_offset_t va; int lvl; KASSERT((sva & L3_OFFSET) == 0, ("pmap_kremove_device: Invalid virtual address")); KASSERT((size & PAGE_MASK) == 0, ("pmap_kremove_device: Mapping is not page-sized")); va = sva; while (size != 0) { pte = pmap_pte(kernel_pmap, va, &lvl); KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va)); KASSERT(lvl == 3, ("Invalid device pagetable level: %d != 3", lvl)); pmap_load_clear(pte); PTE_SYNC(pte); va += PAGE_SIZE; size -= PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) { return PHYS_TO_DMAP(start); } /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) { pd_entry_t *pde; pt_entry_t *pte, pa; vm_offset_t va; vm_page_t m; int i, lvl; va = sva; for (i = 0; i < count; i++) { pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_qenter: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_qenter: Invalid level %d", lvl)); m = ma[i]; pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) | ATTR_IDX(m->md.pv_memattr) | L3_PAGE; pte = pmap_l2_to_l3(pde, va); pmap_load_store(pte, pa); PTE_SYNC(pte); va += L3_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. */ void pmap_qremove(vm_offset_t sva, int count) { pt_entry_t *pte; vm_offset_t va; int lvl; KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva)); va = sva; while (count-- > 0) { pte = pmap_pte(kernel_pmap, va, &lvl); KASSERT(lvl == 3, ("Invalid device pagetable level: %d != 3", lvl)); if (pte != NULL) { if (pmap_l3_valid_cacheable(pmap_load(pte))) cpu_dcache_wb_range(va, L3_SIZE); pmap_load_clear(pte); PTE_SYNC(pte); } va += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, sva, va); } /*************************************************** * Page table page management routines..... ***************************************************/ static __inline void pmap_free_zero_pages(struct spglist *free) { vm_page_t m; while ((m = SLIST_FIRST(free)) != NULL) { SLIST_REMOVE_HEAD(free, plinks.s.ss); /* Preserve the page's PG_ZERO setting. */ vm_page_free_toq(m); } } /* * Schedule the specified unused page table page to be freed. Specifically, * add the page to the specified list of pages that will be released to the * physical memory manager after the TLB has been updated. */ static __inline void pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, boolean_t set_PG_ZERO) { if (set_PG_ZERO) m->flags |= PG_ZERO; else m->flags &= ~PG_ZERO; SLIST_INSERT_HEAD(free, m, plinks.s.ss); } /* * Decrements a page table page's wire count, which is used to record the * number of valid page table entries within the page. If the wire count * drops to zero, then the page table page is unmapped. Returns TRUE if the * page table page was unmapped and FALSE otherwise. */ static inline boolean_t pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { --m->wire_count; if (m->wire_count == 0) { _pmap_unwire_l3(pmap, va, m, free); return (TRUE); } else return (FALSE); } static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * unmap the page table page */ if (m->pindex >= (NUL2E + NUL1E)) { /* l1 page */ pd_entry_t *l0; l0 = pmap_l0(pmap, va); pmap_load_clear(l0); PTE_SYNC(l0); } else if (m->pindex >= NUL2E) { /* l2 page */ pd_entry_t *l1; l1 = pmap_l1(pmap, va); pmap_load_clear(l1); PTE_SYNC(l1); } else { /* l3 page */ pd_entry_t *l2; l2 = pmap_l2(pmap, va); pmap_load_clear(l2); PTE_SYNC(l2); } pmap_resident_count_dec(pmap, 1); if (m->pindex < NUL2E) { /* We just released an l3, unhold the matching l2 */ pd_entry_t *l1, tl1; vm_page_t l2pg; l1 = pmap_l1(pmap, va); tl1 = pmap_load(l1); l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); pmap_unwire_l3(pmap, va, l2pg, free); } else if (m->pindex < (NUL2E + NUL1E)) { /* We just released an l2, unhold the matching l1 */ pd_entry_t *l0, tl0; vm_page_t l1pg; l0 = pmap_l0(pmap, va); tl0 = pmap_load(l0); l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); pmap_unwire_l3(pmap, va, l1pg, free); } pmap_invalidate_page(pmap, va); /* * This is a release store so that the ordinary store unmapping * the page table page is globally performed before TLB shoot- * down is begun. */ atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1); /* * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ pmap_add_delayed_free_list(m, free, TRUE); } /* * After removing an l3 entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ static int pmap_unuse_l3(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, struct spglist *free) { vm_page_t mpte; if (va >= VM_MAXUSER_ADDRESS) return (0); KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK); return (pmap_unwire_l3(pmap, va, mpte, free)); } void pmap_pinit0(pmap_t pmap) { PMAP_LOCK_INIT(pmap); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); pmap->pm_l0 = kernel_pmap->pm_l0; } int pmap_pinit(pmap_t pmap) { vm_paddr_t l0phys; vm_page_t l0pt; /* * allocate the l0 page */ while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) VM_WAIT; l0phys = VM_PAGE_TO_PHYS(l0pt); pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys); if ((l0pt->flags & PG_ZERO) == 0) pagezero(pmap->pm_l0); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); return (1); } /* * This routine is called if the desired page table page does not exist. * * If page table page allocation fails, this routine may sleep before * returning NULL. It sleeps only if a lock pointer was given. * * Note: If a page allocation fails at page table level two or three, * one or two pages may be held during the wait, only to be released * afterwards. This conservative approach is easily argued to avoid * race conditions. */ static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) { vm_page_t m, l1pg, l2pg; PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * Allocate a page table page. */ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (lockp != NULL) { RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); rw_runlock(&pvh_global_lock); VM_WAIT; rw_rlock(&pvh_global_lock); PMAP_LOCK(pmap); } /* * Indicate the need to retry. While waiting, the page table * page may have been allocated. */ return (NULL); } if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); /* * Map the pagetable page into the process address space, if * it isn't already there. */ if (ptepindex >= (NUL2E + NUL1E)) { pd_entry_t *l0; vm_pindex_t l0index; l0index = ptepindex - (NUL2E + NUL1E); l0 = &pmap->pm_l0[l0index]; pmap_load_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE); PTE_SYNC(l0); } else if (ptepindex >= NUL2E) { vm_pindex_t l0index, l1index; pd_entry_t *l0, *l1; pd_entry_t tl0; l1index = ptepindex - NUL2E; l0index = l1index >> L0_ENTRIES_SHIFT; l0 = &pmap->pm_l0[l0index]; tl0 = pmap_load(l0); if (tl0 == 0) { /* recurse for allocating page dir */ if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index, lockp) == NULL) { --m->wire_count; /* XXX: release mem barrier? */ atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } } else { l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK); l1pg->wire_count++; } l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK); l1 = &l1[ptepindex & Ln_ADDR_MASK]; pmap_load_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE); PTE_SYNC(l1); } else { vm_pindex_t l0index, l1index; pd_entry_t *l0, *l1, *l2; pd_entry_t tl0, tl1; l1index = ptepindex >> Ln_ENTRIES_SHIFT; l0index = l1index >> L0_ENTRIES_SHIFT; l0 = &pmap->pm_l0[l0index]; tl0 = pmap_load(l0); if (tl0 == 0) { /* recurse for allocating page dir */ if (_pmap_alloc_l3(pmap, NUL2E + l1index, lockp) == NULL) { --m->wire_count; atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } tl0 = pmap_load(l0); l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK); l1 = &l1[l1index & Ln_ADDR_MASK]; } else { l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK); l1 = &l1[l1index & Ln_ADDR_MASK]; tl1 = pmap_load(l1); if (tl1 == 0) { /* recurse for allocating page dir */ if (_pmap_alloc_l3(pmap, NUL2E + l1index, lockp) == NULL) { --m->wire_count; /* XXX: release mem barrier? */ atomic_subtract_int( &vm_cnt.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } } else { l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK); l2pg->wire_count++; } } l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK); l2 = &l2[ptepindex & Ln_ADDR_MASK]; pmap_load_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE); PTE_SYNC(l2); } pmap_resident_count_inc(pmap, 1); return (m); } static vm_page_t pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) { vm_pindex_t ptepindex; pd_entry_t *pde, tpde; vm_page_t m; int lvl; /* * Calculate pagetable page index */ ptepindex = pmap_l2_pindex(va); retry: /* * Get the page directory entry */ pde = pmap_pde(pmap, va, &lvl); /* * If the page table page is mapped, we just increment the hold count, * and activate it. If we get a level 2 pde it will point to a level 3 * table. */ if (lvl == 2) { tpde = pmap_load(pde); if (tpde != 0) { m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK); m->wire_count++; return (m); } } /* * Here if the pte page isn't mapped, or if it has been deallocated. */ m = _pmap_alloc_l3(pmap, ptepindex, lockp); if (m == NULL && lockp != NULL) goto retry; return (m); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { vm_page_t m; KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l0)); m->wire_count--; atomic_subtract_int(&vm_cnt.v_wire_count, 1); vm_page_free_zero(m); } #if 0 static int kvm_size(SYSCTL_HANDLER_ARGS) { unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; return sysctl_handle_long(oidp, &ksize, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_size, "LU", "Size of KVM"); static int kvm_free(SYSCTL_HANDLER_ARGS) { unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; return sysctl_handle_long(oidp, &kfree, 0, req); } SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 0, 0, kvm_free, "LU", "Amount of KVM free"); #endif /* 0 */ /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { vm_paddr_t paddr; vm_page_t nkpg; pd_entry_t *l0, *l1, *l2; mtx_assert(&kernel_map->system_mtx, MA_OWNED); addr = roundup2(addr, L2_SIZE); if (addr - 1 >= kernel_map->max_offset) addr = kernel_map->max_offset; while (kernel_vm_end < addr) { l0 = pmap_l0(kernel_pmap, kernel_vm_end); KASSERT(pmap_load(l0) != 0, ("pmap_growkernel: No level 0 kernel entry")); l1 = pmap_l0_to_l1(l0, kernel_vm_end); if (pmap_load(l1) == 0) { /* We need a new PDP entry */ nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); if ((nkpg->flags & PG_ZERO) == 0) pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); pmap_load_store(l1, paddr | L1_TABLE); PTE_SYNC(l1); continue; /* try again */ } l2 = pmap_l1_to_l2(l1, kernel_vm_end); if ((pmap_load(l2) & ATTR_AF) != 0) { kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } continue; } nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); if ((nkpg->flags & PG_ZERO) == 0) pmap_zero_page(nkpg); paddr = VM_PAGE_TO_PHYS(nkpg); pmap_load_store(l2, paddr | L2_TABLE); PTE_SYNC(l2); pmap_invalidate_page(kernel_pmap, kernel_vm_end); kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; if (kernel_vm_end - 1 >= kernel_map->max_offset) { kernel_vm_end = kernel_map->max_offset; break; } } } /*************************************************** * page management routines. ***************************************************/ CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); CTASSERT(_NPCM == 3); CTASSERT(_NPCPV == 168); static __inline struct pv_chunk * pv_to_chunk(pv_entry_t pv) { return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); } #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) #define PC_FREE0 0xfffffffffffffffful #define PC_FREE1 0xfffffffffffffffful #define PC_FREE2 0x000000fffffffffful static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 }; #if 0 #ifdef PV_STATS static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, "Current number of pv entry chunks"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, "Current number of pv entry chunks allocated"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, "Current number of pv entry chunks frees"); SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, "Number of times tried to get a chunk page but failed."); static long pv_entry_frees, pv_entry_allocs, pv_entry_count; static int pv_entry_spare; SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, "Current number of pv entry frees"); SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, "Current number of pv entry allocs"); SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, "Current number of pv entries"); SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, "Current number of spare pv entries"); #endif #endif /* 0 */ /* * We are in a serious low memory condition. Resort to * drastic measures to free some pages so we can allocate * another pv entry chunk. * * Returns NULL if PV entries were reclaimed from the specified pmap. * * We do not, however, unmap 2mpages because subsequent accesses will * allocate per-page pv entries until repromotion occurs, thereby * exacerbating the shortage of free pv entries. */ static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) { panic("ARM64TODO: reclaim_pv_chunk"); } /* * free the pv_entry back to the free list */ static void free_pv_entry(pmap_t pmap, pv_entry_t pv) { struct pv_chunk *pc; int idx, field, bit; rw_assert(&pvh_global_lock, RA_LOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(atomic_add_long(&pv_entry_frees, 1)); PV_STAT(atomic_add_int(&pv_entry_spare, 1)); PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); pc = pv_to_chunk(pv); idx = pv - &pc->pc_pventry[0]; field = idx / 64; bit = idx % 64; pc->pc_map[field] |= 1ul << bit; if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 || pc->pc_map[2] != PC_FREE2) { /* 98% of the time, pc is already at the head of the list. */ if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); } return; } TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); free_pv_chunk(pc); } static void free_pv_chunk(struct pv_chunk *pc) { vm_page_t m; mtx_lock(&pv_chunks_mutex); TAILQ_REMOVE(&pv_chunks, pc, pc_lru); mtx_unlock(&pv_chunks_mutex); PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); vm_page_unwire(m, PQ_NONE); vm_page_free(m); } /* * Returns a new PV entry, allocating a new PV chunk from the system when * needed. If this PV chunk allocation fails and a PV list lock pointer was * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is * returned. * * The given PV list lock may be released. */ static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp) { int bit, field; pv_entry_t pv; struct pv_chunk *pc; vm_page_t m; rw_assert(&pvh_global_lock, RA_LOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); retry: pc = TAILQ_FIRST(&pmap->pm_pvchunk); if (pc != NULL) { for (field = 0; field < _NPCM; field++) { if (pc->pc_map[field]) { bit = ffsl(pc->pc_map[field]) - 1; break; } } if (field < _NPCM) { pv = &pc->pc_pventry[field * 64 + bit]; pc->pc_map[field] &= ~(1ul << bit); /* If this was the last item, move it to tail */ if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } PV_STAT(atomic_add_long(&pv_entry_count, 1)); PV_STAT(atomic_subtract_int(&pv_entry_spare, 1)); return (pv); } } /* No free items, allocate another chunk */ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { if (lockp == NULL) { PV_STAT(pc_chunk_tryfail++); return (NULL); } m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) goto retry; } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ pc->pc_map[1] = PC_FREE1; pc->pc_map[2] = PC_FREE2; mtx_lock(&pv_chunks_mutex); TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); mtx_unlock(&pv_chunks_mutex); pv = &pc->pc_pventry[0]; TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); PV_STAT(atomic_add_long(&pv_entry_count, 1)); PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1)); return (pv); } /* * First find and then remove the pv entry for the specified pmap and virtual * address from the specified pv list. Returns the pv entry if found and NULL * otherwise. This operation can be performed on pv lists for either 4KB or * 2MB page mappings. */ static __inline pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { pv_entry_t pv; rw_assert(&pvh_global_lock, RA_LOCKED); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (pmap == PV_PMAP(pv) && va == pv->pv_va) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); pvh->pv_gen++; break; } } return (pv); } /* * First find and then destroy the pv entry for the specified pmap and virtual * address. This operation can be performed on pv lists for either 4KB or 2MB * page mappings. */ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) { pv_entry_t pv; pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); free_pv_entry(pmap, pv); } /* * Conditionally create the PV entry for a 4KB page mapping if the required * memory can be allocated without resorting to reclamation. */ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp) { pv_entry_t pv; rw_assert(&pvh_global_lock, RA_LOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* Pass NULL instead of the lock pointer to disable reclamation. */ if ((pv = get_pv_entry(pmap, NULL)) != NULL) { pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; return (TRUE); } else return (FALSE); } /* * pmap_remove_l3: do the things to unmap a page in a process */ static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va, pd_entry_t l2e, struct spglist *free, struct rwlock **lockp) { pt_entry_t old_l3; vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3))) cpu_dcache_wb_range(va, L3_SIZE); old_l3 = pmap_load_clear(l3); PTE_SYNC(l3); pmap_invalidate_page(pmap, va); if (old_l3 & ATTR_SW_WIRED) pmap->pm_stats.wired_count -= 1; pmap_resident_count_dec(pmap, 1); if (old_l3 & ATTR_SW_MANAGED) { m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK); if (pmap_page_dirty(old_l3)) vm_page_dirty(m); if (old_l3 & ATTR_AF) vm_page_aflag_set(m, PGA_REFERENCED); CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); pmap_pvh_free(&m->md, pmap, va); } return (pmap_unuse_l3(pmap, va, l2e, free)); } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { struct rwlock *lock; vm_offset_t va, va_next; pd_entry_t *l0, *l1, *l2; pt_entry_t l3_paddr, *l3; struct spglist free; int anyvalid; /* * Perform an unsynchronized read. This is, however, safe. */ if (pmap->pm_stats.resident_count == 0) return; anyvalid = 0; SLIST_INIT(&free); rw_rlock(&pvh_global_lock); PMAP_LOCK(pmap); lock = NULL; for (; sva < eva; sva = va_next) { if (pmap->pm_stats.resident_count == 0) break; l0 = pmap_l0(pmap, sva); if (pmap_load(l0) == 0) { va_next = (sva + L0_SIZE) & ~L0_OFFSET; if (va_next < sva) va_next = eva; continue; } l1 = pmap_l0_to_l1(l0, sva); if (pmap_load(l1) == 0) { va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; continue; } /* * Calculate index for next page table. */ va_next = (sva + L2_SIZE) & ~L2_OFFSET; if (va_next < sva) va_next = eva; l2 = pmap_l1_to_l2(l1, sva); if (l2 == NULL) continue; l3_paddr = pmap_load(l2); /* * Weed out invalid mappings. */ if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE) continue; /* * Limit our scan to either the end of the va represented * by the current page table page, or to the end of the * range being removed. */ if (va_next > eva) va_next = eva; va = va_next; for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, sva += L3_SIZE) { if (l3 == NULL) panic("l3 == NULL"); if (pmap_load(l3) == 0) { if (va != va_next) { pmap_invalidate_range(pmap, va, sva); va = va_next; } continue; } if (va == va_next) va = sva; if (pmap_remove_l3(pmap, l3, sva, l3_paddr, &free, &lock)) { sva += L3_SIZE; break; } } if (va != va_next) pmap_invalidate_range(pmap, va, sva); } if (lock != NULL) rw_wunlock(lock); if (anyvalid) pmap_invalidate_all(pmap); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); pmap_free_zero_pages(&free); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { pv_entry_t pv; pmap_t pmap; pd_entry_t *pde, tpde; pt_entry_t *pte, tpte; struct spglist free; int lvl; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); SLIST_INIT(&free); rw_wlock(&pvh_global_lock); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pmap_resident_count_dec(pmap, 1); pde = pmap_pde(pmap, pv->pv_va, &lvl); KASSERT(pde != NULL, ("pmap_remove_all: no page directory entry found")); KASSERT(lvl == 2, ("pmap_remove_all: invalid pde level %d", lvl)); tpde = pmap_load(pde); pte = pmap_l2_to_l3(pde, pv->pv_va); tpte = pmap_load(pte); if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(tpte)) cpu_dcache_wb_range(pv->pv_va, L3_SIZE); pmap_load_clear(pte); PTE_SYNC(pte); pmap_invalidate_page(pmap, pv->pv_va); if (tpte & ATTR_SW_WIRED) pmap->pm_stats.wired_count--; if ((tpte & ATTR_AF) != 0) vm_page_aflag_set(m, PGA_REFERENCED); /* * Update the vm_page_t clean and reference bits. */ if (pmap_page_dirty(tpte)) vm_page_dirty(m); pmap_unuse_l3(pmap, pv->pv_va, tpde, &free); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(&pvh_global_lock); pmap_free_zero_pages(&free); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { vm_offset_t va, va_next; pd_entry_t *l0, *l1, *l2; pt_entry_t *l3p, l3; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if ((prot & VM_PROT_WRITE) == VM_PROT_WRITE) return; PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { l0 = pmap_l0(pmap, sva); if (pmap_load(l0) == 0) { va_next = (sva + L0_SIZE) & ~L0_OFFSET; if (va_next < sva) va_next = eva; continue; } l1 = pmap_l0_to_l1(l0, sva); if (pmap_load(l1) == 0) { va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; continue; } va_next = (sva + L2_SIZE) & ~L2_OFFSET; if (va_next < sva) va_next = eva; l2 = pmap_l1_to_l2(l1, sva); if (l2 == NULL || (pmap_load(l2) & ATTR_DESCR_MASK) != L2_TABLE) continue; if (va_next > eva) va_next = eva; va = va_next; for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++, sva += L3_SIZE) { l3 = pmap_load(l3p); if (pmap_l3_valid(l3)) { pmap_set(l3p, ATTR_AP(ATTR_AP_RO)); PTE_SYNC(l3p); /* XXX: Use pmap_invalidate_range */ pmap_invalidate_page(pmap, va); } } } PMAP_UNLOCK(pmap); /* TODO: Only invalidate entries we are touching */ pmap_invalidate_all(pmap); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind __unused) { struct rwlock *lock; pd_entry_t *pde; pt_entry_t new_l3, orig_l3; pt_entry_t *l3; pv_entry_t pv; vm_paddr_t opa, pa, l1_pa, l2_pa, l3_pa; vm_page_t mpte, om, l1_m, l2_m, l3_m; boolean_t nosleep; int lvl; va = trunc_page(va); if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) VM_OBJECT_ASSERT_LOCKED(m->object); pa = VM_PAGE_TO_PHYS(m); new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) | L3_PAGE); if ((prot & VM_PROT_WRITE) == 0) new_l3 |= ATTR_AP(ATTR_AP_RO); if ((flags & PMAP_ENTER_WIRED) != 0) new_l3 |= ATTR_SW_WIRED; if ((va >> 63) == 0) new_l3 |= ATTR_AP(ATTR_AP_USER); CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa); mpte = NULL; lock = NULL; rw_rlock(&pvh_global_lock); PMAP_LOCK(pmap); if (va < VM_MAXUSER_ADDRESS) { nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock); if (mpte == NULL && nosleep) { CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); if (lock != NULL) rw_wunlock(lock); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); return (KERN_RESOURCE_SHORTAGE); } pde = pmap_pde(pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_enter: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_enter: Invalid level %d", lvl)); l3 = pmap_l2_to_l3(pde, va); } else { pde = pmap_pde(pmap, va, &lvl); /* * If we get a level 2 pde it must point to a level 3 entry * otherwise we will need to create the intermediate tables */ if (lvl < 2) { switch(lvl) { default: case -1: /* Get the l0 pde to update */ pde = pmap_l0(pmap, va); KASSERT(pde != NULL, ("...")); l1_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (l1_m == NULL) panic("pmap_enter: l1 pte_m == NULL"); if ((l1_m->flags & PG_ZERO) == 0) pmap_zero_page(l1_m); l1_pa = VM_PAGE_TO_PHYS(l1_m); pmap_load_store(pde, l1_pa | L0_TABLE); PTE_SYNC(pde); /* FALLTHROUGH */ case 0: /* Get the l1 pde to update */ pde = pmap_l1_to_l2(pde, va); KASSERT(pde != NULL, ("...")); l2_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (l2_m == NULL) panic("pmap_enter: l2 pte_m == NULL"); if ((l2_m->flags & PG_ZERO) == 0) pmap_zero_page(l2_m); l2_pa = VM_PAGE_TO_PHYS(l2_m); pmap_load_store(pde, l2_pa | L1_TABLE); PTE_SYNC(pde); /* FALLTHROUGH */ case 1: /* Get the l2 pde to update */ pde = pmap_l1_to_l2(pde, va); l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (l3_m == NULL) panic("pmap_enter: l3 pte_m == NULL"); if ((l3_m->flags & PG_ZERO) == 0) pmap_zero_page(l3_m); l3_pa = VM_PAGE_TO_PHYS(l3_m); pmap_load_store(pde, l3_pa | L2_TABLE); PTE_SYNC(pde); break; } } l3 = pmap_l2_to_l3(pde, va); pmap_invalidate_page(pmap, va); } om = NULL; orig_l3 = pmap_load(l3); opa = orig_l3 & ~ATTR_MASK; /* * Is the specified virtual address already mapped? */ if (pmap_l3_valid(orig_l3)) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if ((flags & PMAP_ENTER_WIRED) != 0 && (orig_l3 & ATTR_SW_WIRED) == 0) pmap->pm_stats.wired_count++; else if ((flags & PMAP_ENTER_WIRED) == 0 && (orig_l3 & ATTR_SW_WIRED) != 0) pmap->pm_stats.wired_count--; /* * Remove the extra PT page reference. */ if (mpte != NULL) { mpte->wire_count--; KASSERT(mpte->wire_count > 0, ("pmap_enter: missing reference to page table page," " va: 0x%lx", va)); } /* * Has the physical page changed? */ if (opa == pa) { /* * No, might be a protection or wiring change. */ if ((orig_l3 & ATTR_SW_MANAGED) != 0) { new_l3 |= ATTR_SW_MANAGED; if ((new_l3 & ATTR_AP(ATTR_AP_RW)) == ATTR_AP(ATTR_AP_RW)) { vm_page_aflag_set(m, PGA_WRITEABLE); } } goto validate; } /* Flush the cache, there might be uncommitted data in it */ if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3)) cpu_dcache_wb_range(va, L3_SIZE); } else { /* * Increment the counters. */ if ((new_l3 & ATTR_SW_WIRED) != 0) pmap->pm_stats.wired_count++; pmap_resident_count_inc(pmap, 1); } /* * Enter on the PV list if part of our managed memory. */ if ((m->oflags & VPO_UNMANAGED) == 0) { new_l3 |= ATTR_SW_MANAGED; pv = get_pv_entry(pmap, &lock); pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; if ((new_l3 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) vm_page_aflag_set(m, PGA_WRITEABLE); } /* * Update the L3 entry. */ if (orig_l3 != 0) { validate: orig_l3 = pmap_load_store(l3, new_l3); PTE_SYNC(l3); opa = orig_l3 & ~ATTR_MASK; if (opa != pa) { if ((orig_l3 & ATTR_SW_MANAGED) != 0) { om = PHYS_TO_VM_PAGE(opa); if (pmap_page_dirty(orig_l3)) vm_page_dirty(om); if ((orig_l3 & ATTR_AF) != 0) vm_page_aflag_set(om, PGA_REFERENCED); CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); pmap_pvh_free(&om->md, pmap, va); } } else if (pmap_page_dirty(orig_l3)) { if ((orig_l3 & ATTR_SW_MANAGED) != 0) vm_page_dirty(m); } } else { pmap_load_store(l3, new_l3); PTE_SYNC(l3); } pmap_invalidate_page(pmap, va); if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap)) cpu_icache_sync_range(va, PAGE_SIZE); if (lock != NULL) rw_wunlock(lock); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); return (KERN_SUCCESS); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { struct rwlock *lock; vm_offset_t va; vm_page_t m, mpte; vm_pindex_t diff, psize; VM_OBJECT_ASSERT_LOCKED(m_start->object); psize = atop(end - start); mpte = NULL; m = m_start; lock = NULL; rw_rlock(&pvh_global_lock); PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { va = start + ptoa(diff); mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock); m = TAILQ_NEXT(m, listq); } if (lock != NULL) rw_wunlock(lock); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { struct rwlock *lock; lock = NULL; rw_rlock(&pvh_global_lock); PMAP_LOCK(pmap); (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock); if (lock != NULL) rw_wunlock(lock); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) { struct spglist free; pd_entry_t *pde; pt_entry_t *l3; vm_paddr_t pa; int lvl; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); rw_assert(&pvh_global_lock, RA_LOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va); /* * In the case that a page table page is not * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { vm_pindex_t l2pindex; /* * Calculate pagetable page index */ l2pindex = pmap_l2_pindex(va); if (mpte && (mpte->pindex == l2pindex)) { mpte->wire_count++; } else { /* * Get the l2 entry */ pde = pmap_pde(pmap, va, &lvl); /* * If the page table page is mapped, we just increment * the hold count, and activate it. Otherwise, we * attempt to allocate a page table page. If this * attempt fails, we don't retry. Instead, we give up. */ if (lvl == 2 && pmap_load(pde) != 0) { mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK); mpte->wire_count++; } else { /* * Pass NULL instead of the PV list lock * pointer, because we don't intend to sleep. */ mpte = _pmap_alloc_l3(pmap, l2pindex, NULL); if (mpte == NULL) return (mpte); } } l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); l3 = &l3[pmap_l3_index(va)]; } else { mpte = NULL; pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx", va)); KASSERT(lvl == 2, ("pmap_enter_quick_locked: Invalid level %d", lvl)); l3 = pmap_l2_to_l3(pde, va); } if (pmap_load(l3) != 0) { if (mpte != NULL) { mpte->wire_count--; mpte = NULL; } return (mpte); } /* * Enter on the PV list if part of our managed memory. */ if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { if (mpte != NULL) { SLIST_INIT(&free); if (pmap_unwire_l3(pmap, va, mpte, &free)) { pmap_invalidate_page(pmap, va); pmap_free_zero_pages(&free); } mpte = NULL; } return (mpte); } /* * Increment counters */ pmap_resident_count_inc(pmap, 1); pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) | ATTR_AP(ATTR_AP_RW) | L3_PAGE; /* * Now validate mapping with RO protection */ if ((m->oflags & VPO_UNMANAGED) == 0) pa |= ATTR_SW_MANAGED; pmap_load_store(l3, pa); PTE_SYNC(l3); pmap_invalidate_page(pmap, va); return (mpte); } /* * This code maps large physical mmap regions into the * processor address space. Note that some shortcuts * are taken, but the code works. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, ("pmap_object_init_pt: non-device object")); } /* * Clear the wired attribute from the mappings for the specified range of * addresses in the given pmap. Every valid mapping within that range * must have the wired attribute set. In contrast, invalid mappings * cannot have the wired attribute set, so they are ignored. * * The wired attribute of the page table entry is not a hardware feature, * so there is no need to invalidate any TLB entries. */ void pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t va_next; pd_entry_t *l0, *l1, *l2; pt_entry_t *l3; boolean_t pv_lists_locked; pv_lists_locked = FALSE; PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { l0 = pmap_l0(pmap, sva); if (pmap_load(l0) == 0) { va_next = (sva + L0_SIZE) & ~L0_OFFSET; if (va_next < sva) va_next = eva; continue; } l1 = pmap_l0_to_l1(l0, sva); if (pmap_load(l1) == 0) { va_next = (sva + L1_SIZE) & ~L1_OFFSET; if (va_next < sva) va_next = eva; continue; } va_next = (sva + L2_SIZE) & ~L2_OFFSET; if (va_next < sva) va_next = eva; l2 = pmap_l1_to_l2(l1, sva); if (pmap_load(l2) == 0) continue; if (va_next > eva) va_next = eva; for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, sva += L3_SIZE) { if (pmap_load(l3) == 0) continue; if ((pmap_load(l3) & ATTR_SW_WIRED) == 0) panic("pmap_unwire: l3 %#jx is missing " "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3)); /* * PG_W must be cleared atomically. Although the pmap * lock synchronizes access to PG_W, another processor * could be setting PG_M and/or PG_A concurrently. */ atomic_clear_long(l3, ATTR_SW_WIRED); pmap->pm_stats.wired_count--; } } if (pv_lists_locked) rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { } /* * pmap_zero_page zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); pagezero((void *)va); } /* * pmap_zero_page_area zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. * * off and size may not cover an area beyond a single hardware page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); if (off == 0 && size == PAGE_SIZE) pagezero((void *)va); else bzero((char *)va + off, size); } /* * pmap_zero_page_idle zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. This * is intended to be called from the vm_pagezero process only and * outside of Giant. */ void pmap_zero_page_idle(vm_page_t m) { vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); pagezero((void *)va); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); pagecopy((void *)src, (void *)dst); } int unmapped_buf_allowed = 1; void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize) { void *a_cp, *b_cp; vm_page_t m_a, m_b; vm_paddr_t p_a, p_b; vm_offset_t a_pg_offset, b_pg_offset; int cnt; while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; m_a = ma[a_offset >> PAGE_SHIFT]; p_a = m_a->phys_addr; b_pg_offset = b_offset & PAGE_MASK; m_b = mb[b_offset >> PAGE_SHIFT]; p_b = m_b->phys_addr; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); cnt = min(cnt, PAGE_SIZE - b_pg_offset); if (__predict_false(!PHYS_IN_DMAP(p_a))) { panic("!DMAP a %lx", p_a); } else { a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset; } if (__predict_false(!PHYS_IN_DMAP(p_b))) { panic("!DMAP b %lx", p_b); } else { b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset; } bcopy(a_cp, b_cp, cnt); a_offset += cnt; b_offset += cnt; xfersize -= cnt; } } vm_offset_t pmap_quick_enter_page(vm_page_t m) { return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); } void pmap_quick_remove_page(vm_offset_t addr) { } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { struct rwlock *lock; pv_entry_t pv; int loops = 0; boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; rw_rlock(&pvh_global_lock); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { rv = TRUE; break; } loops++; if (loops >= 16) break; } rw_runlock(lock); rw_runlock(&pvh_global_lock); return (rv); } /* * pmap_page_wired_mappings: * * Return the number of managed mappings to the given physical page * that are wired. */ int pmap_page_wired_mappings(vm_page_t m) { struct rwlock *lock; pmap_t pmap; pt_entry_t *pte; pv_entry_t pv; int count, lvl, md_gen; if ((m->oflags & VPO_UNMANAGED) != 0) return (0); rw_rlock(&pvh_global_lock); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: count = 0; TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte(pmap, pv->pv_va, &lvl); if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0) count++; PMAP_UNLOCK(pmap); } rw_runlock(lock); rw_runlock(&pvh_global_lock); return (count); } /* * Destroy all managed, non-wired mappings in the given user-space * pmap. This pmap cannot be active on any processor besides the * caller. * * This function cannot be applied to the kernel pmap. Moreover, it * is not intended for general use. It is only to be used during * process termination. Consequently, it can be implemented in ways * that make it faster than pmap_remove(). First, it can more quickly * destroy mappings by iterating over the pmap's collection of PV * entries, rather than searching the page table. Second, it doesn't * have to test and clear the page table entries atomically, because * no processor is currently accessing the user address space. In * particular, a page table entry's dirty bit won't change state once * this function starts. */ void pmap_remove_pages(pmap_t pmap) { pd_entry_t *pde; pt_entry_t *pte, tpte; struct spglist free; vm_page_t m; pv_entry_t pv; struct pv_chunk *pc, *npc; struct rwlock *lock; int64_t bit; uint64_t inuse, bitmask; int allfree, field, freed, idx, lvl; vm_paddr_t pa; lock = NULL; SLIST_INIT(&free); rw_rlock(&pvh_global_lock); PMAP_LOCK(pmap); TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { allfree = 1; freed = 0; for (field = 0; field < _NPCM; field++) { inuse = ~pc->pc_map[field] & pc_freemask[field]; while (inuse != 0) { bit = ffsl(inuse) - 1; bitmask = 1UL << bit; idx = field * 64 + bit; pv = &pc->pc_pventry[idx]; inuse &= ~bitmask; pde = pmap_pde(pmap, pv->pv_va, &lvl); KASSERT(pde != NULL, ("Attempting to remove an unmapped page")); KASSERT(lvl == 2, ("Invalid page directory level: %d", lvl)); pte = pmap_l2_to_l3(pde, pv->pv_va); KASSERT(pte != NULL, ("Attempting to remove an unmapped page")); tpte = pmap_load(pte); /* * We cannot remove wired pages from a process' mapping at this time */ if (tpte & ATTR_SW_WIRED) { allfree = 0; continue; } pa = tpte & ~ATTR_MASK; m = PHYS_TO_VM_PAGE(pa); KASSERT(m->phys_addr == pa, ("vm_page_t %p phys_addr mismatch %016jx %016jx", m, (uintmax_t)m->phys_addr, (uintmax_t)tpte)); KASSERT((m->flags & PG_FICTITIOUS) != 0 || m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad pte %#jx", (uintmax_t)tpte)); /* XXX: assumes tpte is level 3 */ if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(tpte)) cpu_dcache_wb_range(pv->pv_va, L3_SIZE); pmap_load_clear(pte); PTE_SYNC(pte); pmap_invalidate_page(pmap, pv->pv_va); /* * Update the vm_page_t clean/reference bits. */ if ((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) vm_page_dirty(m); CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); /* Mark free */ pc->pc_map[field] |= bitmask; pmap_resident_count_dec(pmap, 1); TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; pmap_unuse_l3(pmap, pv->pv_va, pmap_load(pde), &free); freed++; } } PV_STAT(atomic_add_long(&pv_entry_frees, freed)); PV_STAT(atomic_add_int(&pv_entry_spare, freed)); PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); if (allfree) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); free_pv_chunk(pc); } } pmap_invalidate_all(pmap); if (lock != NULL) rw_wunlock(lock); rw_runlock(&pvh_global_lock); PMAP_UNLOCK(pmap); pmap_free_zero_pages(&free); } /* * This is used to check if a page has been accessed or modified. As we * don't have a bit to see if it has been modified we have to assume it * has been if the page is read/write. */ static boolean_t pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) { struct rwlock *lock; pv_entry_t pv; pt_entry_t *pte, mask, value; pmap_t pmap; int lvl, md_gen; boolean_t rv; rv = FALSE; rw_rlock(&pvh_global_lock); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; rw_runlock(lock); PMAP_LOCK(pmap); rw_rlock(lock); if (md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto restart; } } pte = pmap_pte(pmap, pv->pv_va, &lvl); KASSERT(lvl == 3, ("pmap_page_test_mappings: Invalid level %d", lvl)); mask = 0; value = 0; if (modified) { mask |= ATTR_AP_RW_BIT; value |= ATTR_AP(ATTR_AP_RW); } if (accessed) { mask |= ATTR_AF | ATTR_DESCR_MASK; value |= ATTR_AF | L3_PAGE; } rv = (pmap_load(pte) & mask) == value; PMAP_UNLOCK(pmap); if (rv) goto out; } out: rw_runlock(lock); rw_runlock(&pvh_global_lock); return (rv); } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have PG_M set. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); return (pmap_page_test_mappings(m, FALSE, TRUE)); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is eligible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pt_entry_t *pte; boolean_t rv; int lvl; rv = FALSE; PMAP_LOCK(pmap); pte = pmap_pte(pmap, addr, &lvl); if (pte != NULL && pmap_load(pte) != 0) { rv = TRUE; } PMAP_UNLOCK(pmap); return (rv); } /* * pmap_is_referenced: * * Return whether or not the specified physical page was referenced * in any physical maps. */ boolean_t pmap_is_referenced(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); return (pmap_page_test_mappings(m, TRUE, FALSE)); } /* * Clear the write and modified bits in each of the given page's mappings. */ void pmap_remove_write(vm_page_t m) { pmap_t pmap; struct rwlock *lock; pv_entry_t pv; pt_entry_t oldpte, *pte; int lvl, md_gen; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * set by another thread while the object is locked. Thus, * if PGA_WRITEABLE is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return; rw_rlock(&pvh_global_lock); lock = VM_PAGE_TO_PV_LIST_LOCK(m); retry_pv_loop: rw_wlock(lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); rw_wunlock(lock); goto retry_pv_loop; } } pte = pmap_pte(pmap, pv->pv_va, &lvl); retry: oldpte = pmap_load(pte); if ((oldpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) { if (!atomic_cmpset_long(pte, oldpte, oldpte | ATTR_AP(ATTR_AP_RO))) goto retry; if ((oldpte & ATTR_AF) != 0) vm_page_dirty(m); pmap_invalidate_page(pmap, pv->pv_va); } PMAP_UNLOCK(pmap); } rw_wunlock(lock); vm_page_aflag_clear(m, PGA_WRITEABLE); rw_runlock(&pvh_global_lock); } static __inline boolean_t safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte) { return (FALSE); } #define PMAP_TS_REFERENCED_MAX 5 /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { pv_entry_t pv, pvf; pmap_t pmap; struct rwlock *lock; pd_entry_t *pde, tpde; pt_entry_t *pte, tpte; vm_paddr_t pa; int cleared, md_gen, not_cleared, lvl; struct spglist free; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); SLIST_INIT(&free); cleared = 0; pa = VM_PAGE_TO_PHYS(m); lock = PHYS_TO_PV_LIST_LOCK(pa); rw_rlock(&pvh_global_lock); rw_wlock(lock); retry: not_cleared = 0; if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) goto out; pv = pvf; do { if (pvf == NULL) pvf = pv; pmap = PV_PMAP(pv); if (!PMAP_TRYLOCK(pmap)) { md_gen = m->md.pv_gen; rw_wunlock(lock); PMAP_LOCK(pmap); rw_wlock(lock); if (md_gen != m->md.pv_gen) { PMAP_UNLOCK(pmap); goto retry; } } pde = pmap_pde(pmap, pv->pv_va, &lvl); KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found")); KASSERT(lvl == 2, ("pmap_ts_referenced: invalid pde level %d", lvl)); tpde = pmap_load(pde); KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE, ("pmap_ts_referenced: found an invalid l2 table")); pte = pmap_l2_to_l3(pde, pv->pv_va); tpte = pmap_load(pte); if ((tpte & ATTR_AF) != 0) { if (safe_to_clear_referenced(pmap, tpte)) { /* * TODO: We don't handle the access flag * at all. We need to be able to set it in * the exception handler. */ panic("ARM64TODO: safe_to_clear_referenced\n"); } else if ((tpte & ATTR_SW_WIRED) == 0) { /* * Wired pages cannot be paged out so * doing accessed bit emulation for * them is wasted effort. We do the * hard work for unwired pages only. */ pmap_remove_l3(pmap, pte, pv->pv_va, tpde, &free, &lock); pmap_invalidate_page(pmap, pv->pv_va); cleared++; if (pvf == pv) pvf = NULL; pv = NULL; KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), ("inconsistent pv lock %p %p for page %p", lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); } else not_cleared++; } PMAP_UNLOCK(pmap); /* Rotate the PV list if it has more than one entry. */ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) { TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; } } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared + not_cleared < PMAP_TS_REFERENCED_MAX); out: rw_wunlock(lock); rw_runlock(&pvh_global_lock); pmap_free_zero_pages(&free); return (cleared + not_cleared); } /* * Apply the given advice to the specified range of addresses within the * given pmap. Depending on the advice, clear the referenced and/or * modified flags in each mapping and set the mapped page's dirty field. */ void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) { } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT(!vm_page_xbusied(m), ("pmap_clear_modify: page %p is exclusive busied", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. * If the object containing the page is locked and the page is not * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; /* ARM64TODO: We lack support for tracking if a page is modified */ } void * pmap_mapbios(vm_paddr_t pa, vm_size_t size) { return ((void *)PHYS_TO_DMAP(pa)); } void pmap_unmapbios(vm_paddr_t pa, vm_size_t size) { } /* * Sets the memory attribute for the specified page. */ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { m->md.pv_memattr = ma; /* * ARM64TODO: Implement the below (from the amd64 pmap) * If "m" is a normal page, update its direct mapping. This update * can be relied upon to perform any cache operations that are * required for data coherence. */ if ((m->flags & PG_FICTITIOUS) == 0 && PHYS_IN_DMAP(VM_PAGE_TO_PHYS(m))) panic("ARM64TODO: pmap_page_set_memattr"); } /* * perform the pmap work for mincore */ int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pd_entry_t *l1p, l1; pd_entry_t *l2p, l2; pt_entry_t *l3p, l3; vm_paddr_t pa; bool managed; int val; PMAP_LOCK(pmap); retry: pa = 0; val = 0; managed = false; l1p = pmap_l1(pmap, addr); if (l1p == NULL) /* No l1 */ goto done; l1 = pmap_load(l1p); if ((l1 & ATTR_DESCR_MASK) == L1_INVAL) goto done; if ((l1 & ATTR_DESCR_MASK) == L1_BLOCK) { pa = (l1 & ~ATTR_MASK) | (addr & L1_OFFSET); managed = (l1 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED; val = MINCORE_SUPER | MINCORE_INCORE; if (pmap_page_dirty(l1)) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; if ((l1 & ATTR_AF) == ATTR_AF) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; goto done; } l2p = pmap_l1_to_l2(l1p, addr); if (l2p == NULL) /* No l2 */ goto done; l2 = pmap_load(l2p); if ((l2 & ATTR_DESCR_MASK) == L2_INVAL) goto done; if ((l2 & ATTR_DESCR_MASK) == L2_BLOCK) { pa = (l2 & ~ATTR_MASK) | (addr & L2_OFFSET); managed = (l2 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED; val = MINCORE_SUPER | MINCORE_INCORE; if (pmap_page_dirty(l2)) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; if ((l2 & ATTR_AF) == ATTR_AF) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; goto done; } l3p = pmap_l2_to_l3(l2p, addr); if (l3p == NULL) /* No l3 */ goto done; l3 = pmap_load(l2p); if ((l3 & ATTR_DESCR_MASK) == L3_INVAL) goto done; if ((l3 & ATTR_DESCR_MASK) == L3_PAGE) { pa = (l3 & ~ATTR_MASK) | (addr & L3_OFFSET); managed = (l3 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED; val = MINCORE_INCORE; if (pmap_page_dirty(l3)) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; if ((l3 & ATTR_AF) == ATTR_AF) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; } done: if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) goto retry; } else PA_UNLOCK_COND(*locked_pa); PMAP_UNLOCK(pmap); return (val); } void pmap_activate(struct thread *td) { pmap_t pmap; critical_enter(); pmap = vmspace_pmap(td->td_proc->p_vmspace); td->td_pcb->pcb_l0addr = vtophys(pmap->pm_l0); __asm __volatile("msr ttbr0_el1, %0" : : "r"(td->td_pcb->pcb_l0addr)); pmap_invalidate_all(pmap); critical_exit(); } void pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz) { if (va >= VM_MIN_KERNEL_ADDRESS) { cpu_icache_sync_range(va, sz); } else { u_int len, offset; vm_paddr_t pa; /* Find the length of data in this page to flush */ offset = va & PAGE_MASK; len = imin(PAGE_SIZE - offset, sz); while (sz != 0) { /* Extract the physical address & find it in the DMAP */ pa = pmap_extract(pmap, va); if (pa != 0) cpu_icache_sync_range(PHYS_TO_DMAP(pa), len); /* Move to the next page */ sz -= len; va += len; /* Set the length for the next iteration */ len = imin(PAGE_SIZE, sz); } } } /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. */ void pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size) { } /** * Get the kernel virtual address of a set of physical pages. If there are * physical addresses not covered by the DMAP perform a transient mapping * that will be removed when calling pmap_unmap_io_transient. * * \param page The pages the caller wishes to obtain the virtual * address on the kernel memory map. * \param vaddr On return contains the kernel virtual memory address * of the pages passed in the page parameter. * \param count Number of pages passed in. * \param can_fault TRUE if the thread using the mapped pages can take * page faults, FALSE otherwise. * * \returns TRUE if the caller must call pmap_unmap_io_transient when * finished or FALSE otherwise. * */ boolean_t pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, boolean_t can_fault) { vm_paddr_t paddr; boolean_t needs_mapping; int error, i; /* * Allocate any KVA space that we need, this is done in a separate * loop to prevent calling vmem_alloc while pinned. */ needs_mapping = FALSE; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (__predict_false(!PHYS_IN_DMAP(paddr))) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); needs_mapping = TRUE; } else { vaddr[i] = PHYS_TO_DMAP(paddr); } } /* Exit early if everything is covered by the DMAP */ if (!needs_mapping) return (FALSE); if (!can_fault) sched_pin(); for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (!PHYS_IN_DMAP(paddr)) { panic( "pmap_map_io_transient: TODO: Map out of DMAP data"); } } return (needs_mapping); } void pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, boolean_t can_fault) { vm_paddr_t paddr; int i; if (!can_fault) sched_unpin(); for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (!PHYS_IN_DMAP(paddr)) { panic("ARM64TODO: pmap_unmap_io_transient: Unmap data"); } } } Index: projects/release-pkg/sys/arm64/include/vmparam.h =================================================================== --- projects/release-pkg/sys/arm64/include/vmparam.h (revision 297926) +++ projects/release-pkg/sys/arm64/include/vmparam.h (revision 297927) @@ -1,242 +1,248 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91 * from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30 * $FreeBSD$ */ #ifndef _MACHINE_VMPARAM_H_ #define _MACHINE_VMPARAM_H_ /* * Virtual memory related constants, all in bytes */ #ifndef MAXTSIZ #define MAXTSIZ (1*1024*1024*1024) /* max text size */ #endif #ifndef DFLDSIZ #define DFLDSIZ (128*1024*1024) /* initial data size limit */ #endif #ifndef MAXDSIZ #define MAXDSIZ (1*1024*1024*1024) /* max data size */ #endif #ifndef DFLSSIZ #define DFLSSIZ (128*1024*1024) /* initial stack size limit */ #endif #ifndef MAXSSIZ #define MAXSSIZ (1*1024*1024*1024) /* max stack size */ #endif #ifndef SGROWSIZ #define SGROWSIZ (128*1024) /* amount to grow stack */ #endif /* * The physical address space is sparsely populated. */ #define VM_PHYSSEG_SPARSE /* * The number of PHYSSEG entries must be one greater than the number * of phys_avail entries because the phys_avail entry that spans the * largest physical address that is accessible by ISA DMA is split * into two PHYSSEG entries. */ #define VM_PHYSSEG_MAX 64 /* * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool * from which physical pages are allocated and VM_FREEPOOL_DIRECT is * the pool from which physical pages for small UMA objects are * allocated. */ #define VM_NFREEPOOL 2 #define VM_FREEPOOL_DEFAULT 0 #define VM_FREEPOOL_DIRECT 1 /* * Create two free page lists: VM_FREELIST_DEFAULT is for physical * pages that are above the largest physical address that is * accessible by ISA DMA and VM_FREELIST_ISADMA is for physical pages * that are below that address. */ #define VM_NFREELIST 2 #define VM_FREELIST_DEFAULT 0 #define VM_FREELIST_ISADMA 1 /* * An allocation size of 16MB is supported in order to optimize the * use of the direct map by UMA. Specifically, a cache line contains * at most four TTEs, collectively mapping 16MB of physical memory. * By reducing the number of distinct 16MB "pages" that are used by UMA, * the physical memory allocator reduces the likelihood of both 4MB * page TLB misses and cache misses caused by 4MB page TLB misses. */ #define VM_NFREEORDER 12 /* * Enable superpage reservations: 1 level. */ #ifndef VM_NRESERVLEVEL #define VM_NRESERVLEVEL 1 #endif /* * Level 0 reservations consist of 512 pages. */ #ifndef VM_LEVEL_0_ORDER #define VM_LEVEL_0_ORDER 9 #endif /** * Address space layout. * * ARMv8 implements up to a 48 bit virtual address space. The address space is * split into 2 regions at each end of the 64 bit address space, with an * out of range "hole" in the middle. * - * We limit the size of the two spaces to 39 bits each. + * We use the full 48 bits for each region, however the kernel may only use + * a limited range within this space. * - * Upper region: 0xffffffffffffffff - * 0xffffff8000000000 + * Upper region: 0xffffffffffffffff Top of virtual memory * - * Hole: 0xffffff7fffffffff - * 0x0000008000000000 + * 0xfffffeffffffffff End of DMAP + * 0xfffffd0000000000 Start of DMAP * - * Lower region: 0x0000007fffffffff - * 0x0000000000000000 + * 0xffff007fffffffff End of KVA + * 0xffff000000000000 Kernel base address & start of KVA * + * Hole: 0xfffeffffffffffff + * 0x0001000000000000 + * + * Lower region: 0x0000ffffffffffff End of user address space + * 0x0000000000000000 Start of user address space + * * We use the upper region for the kernel, and the lower region for userland. * * We define some interesting address constants: * * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire * 64 bit address space, mostly just for convenience. * * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of * mappable kernel virtual address space. * * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the * user address space. */ #define VM_MIN_ADDRESS (0x0000000000000000UL) #define VM_MAX_ADDRESS (0xffffffffffffffffUL) -/* 32 GiB of kernel addresses */ -#define VM_MIN_KERNEL_ADDRESS (0xffffff8000000000UL) -#define VM_MAX_KERNEL_ADDRESS (0xffffff8800000000UL) +/* 512 GiB of kernel addresses */ +#define VM_MIN_KERNEL_ADDRESS (0xffff000000000000UL) +#define VM_MAX_KERNEL_ADDRESS (0xffff008000000000UL) -/* Direct Map for 128 GiB of PA: 0x0 - 0x1fffffffff */ -#define DMAP_MIN_ADDRESS (0xffffffc000000000UL) -#define DMAP_MAX_ADDRESS (0xffffffdfffffffffUL) +/* 2TiB for the direct map region */ +#define DMAP_MIN_ADDRESS (0xfffffd0000000000UL) +#define DMAP_MAX_ADDRESS (0xffffff0000000000UL) #define DMAP_MIN_PHYSADDR (dmap_phys_base) #define DMAP_MAX_PHYSADDR (dmap_phys_base + (DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS)) /* True if pa is in the dmap range */ #define PHYS_IN_DMAP(pa) ((pa) >= DMAP_MIN_PHYSADDR && \ - (pa) <= DMAP_MAX_PHYSADDR) + (pa) < DMAP_MAX_PHYSADDR) /* True if va is in the dmap range */ #define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \ - (va) <= DMAP_MAX_ADDRESS) + (va) < DMAP_MAX_ADDRESS) #define PHYS_TO_DMAP(pa) \ ({ \ KASSERT(PHYS_IN_DMAP(pa), \ ("%s: PA out of range, PA: 0x%lx", __func__, \ (vm_paddr_t)(pa))); \ ((pa) - dmap_phys_base) | DMAP_MIN_ADDRESS; \ }) #define DMAP_TO_PHYS(va) \ ({ \ KASSERT(VIRT_IN_DMAP(va), \ ("%s: VA out of range, VA: 0x%lx", __func__, \ (vm_offset_t)(va))); \ ((va) & ~DMAP_MIN_ADDRESS) + dmap_phys_base; \ }) #define VM_MIN_USER_ADDRESS (0x0000000000000000UL) #define VM_MAX_USER_ADDRESS (0x0001000000000000UL) #define VM_MINUSER_ADDRESS (VM_MIN_USER_ADDRESS) #define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS) #define KERNBASE (VM_MIN_KERNEL_ADDRESS) #define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE) #define USRSTACK SHAREDPAGE /* * How many physical pages per kmem arena virtual page. */ #ifndef VM_KMEM_SIZE_SCALE #define VM_KMEM_SIZE_SCALE (3) #endif /* * Optional floor (in bytes) on the size of the kmem arena. */ #ifndef VM_KMEM_SIZE_MIN #define VM_KMEM_SIZE_MIN (16 * 1024 * 1024) #endif /* * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the * kernel map. */ #ifndef VM_KMEM_SIZE_MAX #define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \ VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5) #endif /* * Initial pagein size of beginning of executable file. */ #ifndef VM_INITIAL_PAGEIN #define VM_INITIAL_PAGEIN 16 #endif #define UMA_MD_SMALL_ALLOC #ifndef LOCORE extern vm_paddr_t dmap_phys_base; extern u_int tsb_kernel_ldd_phys; extern vm_offset_t vm_max_kernel_address; extern vm_offset_t init_pt_va; #endif #define ZERO_REGION_SIZE (64 * 1024) /* 64KB */ #endif /* !_MACHINE_VMPARAM_H_ */ Index: projects/release-pkg/sys/cam/scsi/scsi_all.c =================================================================== --- projects/release-pkg/sys/cam/scsi/scsi_all.c (revision 297926) +++ projects/release-pkg/sys/cam/scsi/scsi_all.c (revision 297927) @@ -1,8814 +1,8828 @@ /*- * Implementation of Utility functions for all SCSI device types. * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 2003 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #else #include #include #include #include #include #endif #include #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #else #include #include #ifndef FALSE #define FALSE 0 #endif /* FALSE */ #ifndef TRUE #define TRUE 1 #endif /* TRUE */ #define ERESTART -1 /* restart syscall */ #define EJUSTRETURN -2 /* don't modify regs, just return */ #endif /* !_KERNEL */ /* * This is the default number of milliseconds we wait for devices to settle * after a SCSI bus reset. */ #ifndef SCSI_DELAY #define SCSI_DELAY 2000 #endif /* * All devices need _some_ sort of bus settle delay, so we'll set it to * a minimum value of 100ms. Note that this is pertinent only for SPI- * not transport like Fibre Channel or iSCSI where 'delay' is completely * meaningless. */ #ifndef SCSI_MIN_DELAY #define SCSI_MIN_DELAY 100 #endif /* * Make sure the user isn't using seconds instead of milliseconds. */ #if (SCSI_DELAY < SCSI_MIN_DELAY && SCSI_DELAY != 0) #error "SCSI_DELAY is in milliseconds, not seconds! Please use a larger value" #endif int scsi_delay; static int ascentrycomp(const void *key, const void *member); static int senseentrycomp(const void *key, const void *member); static void fetchtableentries(int sense_key, int asc, int ascq, struct scsi_inquiry_data *, const struct sense_key_table_entry **, const struct asc_table_entry **); #ifdef _KERNEL static void init_scsi_delay(void); static int sysctl_scsi_delay(SYSCTL_HANDLER_ARGS); static int set_scsi_delay(int delay); #endif #if !defined(SCSI_NO_OP_STRINGS) #define D (1 << T_DIRECT) #define T (1 << T_SEQUENTIAL) #define L (1 << T_PRINTER) #define P (1 << T_PROCESSOR) #define W (1 << T_WORM) #define R (1 << T_CDROM) #define O (1 << T_OPTICAL) #define M (1 << T_CHANGER) #define A (1 << T_STORARRAY) #define E (1 << T_ENCLOSURE) #define B (1 << T_RBC) #define K (1 << T_OCRW) #define V (1 << T_ADC) #define F (1 << T_OSD) #define S (1 << T_SCANNER) #define C (1 << T_COMM) #define ALL (D | T | L | P | W | R | O | M | A | E | B | K | V | F | S | C) static struct op_table_entry plextor_cd_ops[] = { { 0xD8, R, "CD-DA READ" } }; static struct scsi_op_quirk_entry scsi_op_quirk_table[] = { { /* * I believe that 0xD8 is the Plextor proprietary command * to read CD-DA data. I'm not sure which Plextor CDROM * models support the command, though. I know for sure * that the 4X, 8X, and 12X models do, and presumably the * 12-20X does. I don't know about any earlier models, * though. If anyone has any more complete information, * feel free to change this quirk entry. */ {T_CDROM, SIP_MEDIA_REMOVABLE, "PLEXTOR", "CD-ROM PX*", "*"}, sizeof(plextor_cd_ops)/sizeof(struct op_table_entry), plextor_cd_ops } }; static struct op_table_entry scsi_op_codes[] = { /* * From: http://www.t10.org/lists/op-num.txt * Modifications by Kenneth Merry (ken@FreeBSD.ORG) * and Jung-uk Kim (jkim@FreeBSD.org) * * Note: order is important in this table, scsi_op_desc() currently * depends on the opcodes in the table being in order to save * search time. * Note: scanner and comm. devices are carried over from the previous * version because they were removed in the latest spec. */ /* File: OP-NUM.TXT * * SCSI Operation Codes * Numeric Sorted Listing * as of 5/26/15 * * D - DIRECT ACCESS DEVICE (SBC-2) device column key * .T - SEQUENTIAL ACCESS DEVICE (SSC-2) ----------------- * . L - PRINTER DEVICE (SSC) M = Mandatory * . P - PROCESSOR DEVICE (SPC) O = Optional * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC-2) V = Vendor spec. * . . R - CD/DVE DEVICE (MMC-3) Z = Obsolete * . . O - OPTICAL MEMORY DEVICE (SBC-2) * . . .M - MEDIA CHANGER DEVICE (SMC-2) * . . . A - STORAGE ARRAY DEVICE (SCC-2) * . . . .E - ENCLOSURE SERVICES DEVICE (SES) * . . . .B - SIMPLIFIED DIRECT-ACCESS DEVICE (RBC) * . . . . K - OPTICAL CARD READER/WRITER DEVICE (OCRW) * . . . . V - AUTOMATION/DRIVE INTERFACE (ADC) * . . . . .F - OBJECT-BASED STORAGE (OSD) * OP DTLPWROMAEBKVF Description * -- -------------- ---------------------------------------------- */ /* 00 MMMMMMMMMMMMMM TEST UNIT READY */ { 0x00, ALL, "TEST UNIT READY" }, /* 01 M REWIND */ { 0x01, T, "REWIND" }, /* 01 Z V ZZZZ REZERO UNIT */ { 0x01, D | W | R | O | M, "REZERO UNIT" }, /* 02 VVVVVV V */ /* 03 MMMMMMMMMMOMMM REQUEST SENSE */ { 0x03, ALL, "REQUEST SENSE" }, /* 04 M OO FORMAT UNIT */ { 0x04, D | R | O, "FORMAT UNIT" }, /* 04 O FORMAT MEDIUM */ { 0x04, T, "FORMAT MEDIUM" }, /* 04 O FORMAT */ { 0x04, L, "FORMAT" }, /* 05 VMVVVV V READ BLOCK LIMITS */ { 0x05, T, "READ BLOCK LIMITS" }, /* 06 VVVVVV V */ /* 07 OVV O OV REASSIGN BLOCKS */ { 0x07, D | W | O, "REASSIGN BLOCKS" }, /* 07 O INITIALIZE ELEMENT STATUS */ { 0x07, M, "INITIALIZE ELEMENT STATUS" }, /* 08 MOV O OV READ(6) */ { 0x08, D | T | W | O, "READ(6)" }, /* 08 O RECEIVE */ { 0x08, P, "RECEIVE" }, /* 08 GET MESSAGE(6) */ { 0x08, C, "GET MESSAGE(6)" }, /* 09 VVVVVV V */ /* 0A OO O OV WRITE(6) */ { 0x0A, D | T | W | O, "WRITE(6)" }, /* 0A M SEND(6) */ { 0x0A, P, "SEND(6)" }, /* 0A SEND MESSAGE(6) */ { 0x0A, C, "SEND MESSAGE(6)" }, /* 0A M PRINT */ { 0x0A, L, "PRINT" }, /* 0B Z ZOZV SEEK(6) */ { 0x0B, D | W | R | O, "SEEK(6)" }, /* 0B O SET CAPACITY */ { 0x0B, T, "SET CAPACITY" }, /* 0B O SLEW AND PRINT */ { 0x0B, L, "SLEW AND PRINT" }, /* 0C VVVVVV V */ /* 0D VVVVVV V */ /* 0E VVVVVV V */ /* 0F VOVVVV V READ REVERSE(6) */ { 0x0F, T, "READ REVERSE(6)" }, /* 10 VM VVV WRITE FILEMARKS(6) */ { 0x10, T, "WRITE FILEMARKS(6)" }, /* 10 O SYNCHRONIZE BUFFER */ { 0x10, L, "SYNCHRONIZE BUFFER" }, /* 11 VMVVVV SPACE(6) */ { 0x11, T, "SPACE(6)" }, /* 12 MMMMMMMMMMMMMM INQUIRY */ { 0x12, ALL, "INQUIRY" }, /* 13 V VVVV */ /* 13 O VERIFY(6) */ { 0x13, T, "VERIFY(6)" }, /* 14 VOOVVV RECOVER BUFFERED DATA */ { 0x14, T | L, "RECOVER BUFFERED DATA" }, /* 15 OMO O OOOO OO MODE SELECT(6) */ { 0x15, ALL & ~(P | R | B | F), "MODE SELECT(6)" }, /* 16 ZZMZO OOOZ O RESERVE(6) */ { 0x16, ALL & ~(R | B | V | F | C), "RESERVE(6)" }, /* 16 Z RESERVE ELEMENT(6) */ { 0x16, M, "RESERVE ELEMENT(6)" }, /* 17 ZZMZO OOOZ O RELEASE(6) */ { 0x17, ALL & ~(R | B | V | F | C), "RELEASE(6)" }, /* 17 Z RELEASE ELEMENT(6) */ { 0x17, M, "RELEASE ELEMENT(6)" }, /* 18 ZZZZOZO Z COPY */ { 0x18, D | T | L | P | W | R | O | K | S, "COPY" }, /* 19 VMVVVV ERASE(6) */ { 0x19, T, "ERASE(6)" }, /* 1A OMO O OOOO OO MODE SENSE(6) */ { 0x1A, ALL & ~(P | R | B | F), "MODE SENSE(6)" }, /* 1B O OOO O MO O START STOP UNIT */ { 0x1B, D | W | R | O | A | B | K | F, "START STOP UNIT" }, /* 1B O M LOAD UNLOAD */ { 0x1B, T | V, "LOAD UNLOAD" }, /* 1B SCAN */ { 0x1B, S, "SCAN" }, /* 1B O STOP PRINT */ { 0x1B, L, "STOP PRINT" }, /* 1B O OPEN/CLOSE IMPORT/EXPORT ELEMENT */ { 0x1B, M, "OPEN/CLOSE IMPORT/EXPORT ELEMENT" }, /* 1C OOOOO OOOM OOO RECEIVE DIAGNOSTIC RESULTS */ { 0x1C, ALL & ~(R | B), "RECEIVE DIAGNOSTIC RESULTS" }, /* 1D MMMMM MMOM MMM SEND DIAGNOSTIC */ { 0x1D, ALL & ~(R | B), "SEND DIAGNOSTIC" }, /* 1E OO OOOO O O PREVENT ALLOW MEDIUM REMOVAL */ { 0x1E, D | T | W | R | O | M | K | F, "PREVENT ALLOW MEDIUM REMOVAL" }, /* 1F */ /* 20 V VVV V */ /* 21 V VVV V */ /* 22 V VVV V */ /* 23 V V V V */ /* 23 O READ FORMAT CAPACITIES */ { 0x23, R, "READ FORMAT CAPACITIES" }, /* 24 V VV SET WINDOW */ { 0x24, S, "SET WINDOW" }, /* 25 M M M M READ CAPACITY(10) */ { 0x25, D | W | O | B, "READ CAPACITY(10)" }, /* 25 O READ CAPACITY */ { 0x25, R, "READ CAPACITY" }, /* 25 M READ CARD CAPACITY */ { 0x25, K, "READ CARD CAPACITY" }, /* 25 GET WINDOW */ { 0x25, S, "GET WINDOW" }, /* 26 V VV */ /* 27 V VV */ /* 28 M MOM MM READ(10) */ { 0x28, D | W | R | O | B | K | S, "READ(10)" }, /* 28 GET MESSAGE(10) */ { 0x28, C, "GET MESSAGE(10)" }, /* 29 V VVO READ GENERATION */ { 0x29, O, "READ GENERATION" }, /* 2A O MOM MO WRITE(10) */ { 0x2A, D | W | R | O | B | K, "WRITE(10)" }, /* 2A SEND(10) */ { 0x2A, S, "SEND(10)" }, /* 2A SEND MESSAGE(10) */ { 0x2A, C, "SEND MESSAGE(10)" }, /* 2B Z OOO O SEEK(10) */ { 0x2B, D | W | R | O | K, "SEEK(10)" }, /* 2B O LOCATE(10) */ { 0x2B, T, "LOCATE(10)" }, /* 2B O POSITION TO ELEMENT */ { 0x2B, M, "POSITION TO ELEMENT" }, /* 2C V OO ERASE(10) */ { 0x2C, R | O, "ERASE(10)" }, /* 2D O READ UPDATED BLOCK */ { 0x2D, O, "READ UPDATED BLOCK" }, /* 2D V */ /* 2E O OOO MO WRITE AND VERIFY(10) */ { 0x2E, D | W | R | O | B | K, "WRITE AND VERIFY(10)" }, /* 2F O OOO VERIFY(10) */ { 0x2F, D | W | R | O, "VERIFY(10)" }, /* 30 Z ZZZ SEARCH DATA HIGH(10) */ { 0x30, D | W | R | O, "SEARCH DATA HIGH(10)" }, /* 31 Z ZZZ SEARCH DATA EQUAL(10) */ { 0x31, D | W | R | O, "SEARCH DATA EQUAL(10)" }, /* 31 OBJECT POSITION */ { 0x31, S, "OBJECT POSITION" }, /* 32 Z ZZZ SEARCH DATA LOW(10) */ { 0x32, D | W | R | O, "SEARCH DATA LOW(10)" }, /* 33 Z OZO SET LIMITS(10) */ { 0x33, D | W | R | O, "SET LIMITS(10)" }, /* 34 O O O O PRE-FETCH(10) */ { 0x34, D | W | O | K, "PRE-FETCH(10)" }, /* 34 M READ POSITION */ { 0x34, T, "READ POSITION" }, /* 34 GET DATA BUFFER STATUS */ { 0x34, S, "GET DATA BUFFER STATUS" }, /* 35 O OOO MO SYNCHRONIZE CACHE(10) */ { 0x35, D | W | R | O | B | K, "SYNCHRONIZE CACHE(10)" }, /* 36 Z O O O LOCK UNLOCK CACHE(10) */ { 0x36, D | W | O | K, "LOCK UNLOCK CACHE(10)" }, /* 37 O O READ DEFECT DATA(10) */ { 0x37, D | O, "READ DEFECT DATA(10)" }, /* 37 O INITIALIZE ELEMENT STATUS WITH RANGE */ { 0x37, M, "INITIALIZE ELEMENT STATUS WITH RANGE" }, /* 38 O O O MEDIUM SCAN */ { 0x38, W | O | K, "MEDIUM SCAN" }, /* 39 ZZZZOZO Z COMPARE */ { 0x39, D | T | L | P | W | R | O | K | S, "COMPARE" }, /* 3A ZZZZOZO Z COPY AND VERIFY */ { 0x3A, D | T | L | P | W | R | O | K | S, "COPY AND VERIFY" }, /* 3B OOOOOOOOOOMOOO WRITE BUFFER */ { 0x3B, ALL, "WRITE BUFFER" }, /* 3C OOOOOOOOOO OOO READ BUFFER */ { 0x3C, ALL & ~(B), "READ BUFFER" }, /* 3D O UPDATE BLOCK */ { 0x3D, O, "UPDATE BLOCK" }, /* 3E O O O READ LONG(10) */ { 0x3E, D | W | O, "READ LONG(10)" }, /* 3F O O O WRITE LONG(10) */ { 0x3F, D | W | O, "WRITE LONG(10)" }, /* 40 ZZZZOZOZ CHANGE DEFINITION */ { 0x40, D | T | L | P | W | R | O | M | S | C, "CHANGE DEFINITION" }, /* 41 O WRITE SAME(10) */ { 0x41, D, "WRITE SAME(10)" }, /* 42 O UNMAP */ { 0x42, D, "UNMAP" }, /* 42 O READ SUB-CHANNEL */ { 0x42, R, "READ SUB-CHANNEL" }, /* 43 O READ TOC/PMA/ATIP */ { 0x43, R, "READ TOC/PMA/ATIP" }, /* 44 M M REPORT DENSITY SUPPORT */ { 0x44, T | V, "REPORT DENSITY SUPPORT" }, /* 44 READ HEADER */ /* 45 O PLAY AUDIO(10) */ { 0x45, R, "PLAY AUDIO(10)" }, /* 46 M GET CONFIGURATION */ { 0x46, R, "GET CONFIGURATION" }, /* 47 O PLAY AUDIO MSF */ { 0x47, R, "PLAY AUDIO MSF" }, /* 48 */ /* 49 */ /* 4A M GET EVENT STATUS NOTIFICATION */ { 0x4A, R, "GET EVENT STATUS NOTIFICATION" }, /* 4B O PAUSE/RESUME */ { 0x4B, R, "PAUSE/RESUME" }, /* 4C OOOOO OOOO OOO LOG SELECT */ { 0x4C, ALL & ~(R | B), "LOG SELECT" }, /* 4D OOOOO OOOO OMO LOG SENSE */ { 0x4D, ALL & ~(R | B), "LOG SENSE" }, /* 4E O STOP PLAY/SCAN */ { 0x4E, R, "STOP PLAY/SCAN" }, /* 4F */ /* 50 O XDWRITE(10) */ { 0x50, D, "XDWRITE(10)" }, /* 51 O XPWRITE(10) */ { 0x51, D, "XPWRITE(10)" }, /* 51 O READ DISC INFORMATION */ { 0x51, R, "READ DISC INFORMATION" }, /* 52 O XDREAD(10) */ { 0x52, D, "XDREAD(10)" }, /* 52 O READ TRACK INFORMATION */ { 0x52, R, "READ TRACK INFORMATION" }, /* 53 O RESERVE TRACK */ { 0x53, R, "RESERVE TRACK" }, /* 54 O SEND OPC INFORMATION */ { 0x54, R, "SEND OPC INFORMATION" }, /* 55 OOO OMOOOOMOMO MODE SELECT(10) */ { 0x55, ALL & ~(P), "MODE SELECT(10)" }, /* 56 ZZMZO OOOZ RESERVE(10) */ { 0x56, ALL & ~(R | B | K | V | F | C), "RESERVE(10)" }, /* 56 Z RESERVE ELEMENT(10) */ { 0x56, M, "RESERVE ELEMENT(10)" }, /* 57 ZZMZO OOOZ RELEASE(10) */ { 0x57, ALL & ~(R | B | K | V | F | C), "RELEASE(10)" }, /* 57 Z RELEASE ELEMENT(10) */ { 0x57, M, "RELEASE ELEMENT(10)" }, /* 58 O REPAIR TRACK */ { 0x58, R, "REPAIR TRACK" }, /* 59 */ /* 5A OOO OMOOOOMOMO MODE SENSE(10) */ { 0x5A, ALL & ~(P), "MODE SENSE(10)" }, /* 5B O CLOSE TRACK/SESSION */ { 0x5B, R, "CLOSE TRACK/SESSION" }, /* 5C O READ BUFFER CAPACITY */ { 0x5C, R, "READ BUFFER CAPACITY" }, /* 5D O SEND CUE SHEET */ { 0x5D, R, "SEND CUE SHEET" }, /* 5E OOOOO OOOO M PERSISTENT RESERVE IN */ { 0x5E, ALL & ~(R | B | K | V | C), "PERSISTENT RESERVE IN" }, /* 5F OOOOO OOOO M PERSISTENT RESERVE OUT */ { 0x5F, ALL & ~(R | B | K | V | C), "PERSISTENT RESERVE OUT" }, /* 7E OO O OOOO O extended CDB */ { 0x7E, D | T | R | M | A | E | B | V, "extended CDB" }, /* 7F O M variable length CDB (more than 16 bytes) */ { 0x7F, D | F, "variable length CDB (more than 16 bytes)" }, /* 80 Z XDWRITE EXTENDED(16) */ { 0x80, D, "XDWRITE EXTENDED(16)" }, /* 80 M WRITE FILEMARKS(16) */ { 0x80, T, "WRITE FILEMARKS(16)" }, /* 81 Z REBUILD(16) */ { 0x81, D, "REBUILD(16)" }, /* 81 O READ REVERSE(16) */ { 0x81, T, "READ REVERSE(16)" }, /* 82 Z REGENERATE(16) */ { 0x82, D, "REGENERATE(16)" }, /* 83 OOOOO O OO EXTENDED COPY */ { 0x83, D | T | L | P | W | O | K | V, "EXTENDED COPY" }, /* 84 OOOOO O OO RECEIVE COPY RESULTS */ { 0x84, D | T | L | P | W | O | K | V, "RECEIVE COPY RESULTS" }, /* 85 O O O ATA COMMAND PASS THROUGH(16) */ { 0x85, D | R | B, "ATA COMMAND PASS THROUGH(16)" }, /* 86 OO OO OOOOOOO ACCESS CONTROL IN */ { 0x86, ALL & ~(L | R | F), "ACCESS CONTROL IN" }, /* 87 OO OO OOOOOOO ACCESS CONTROL OUT */ { 0x87, ALL & ~(L | R | F), "ACCESS CONTROL OUT" }, /* * XXX READ(16)/WRITE(16) were not listed for CD/DVE in op-num.txt * but we had it since r1.40. Do we really want them? */ /* 88 MM O O O READ(16) */ { 0x88, D | T | W | O | B, "READ(16)" }, /* 89 O COMPARE AND WRITE*/ { 0x89, D, "COMPARE AND WRITE" }, /* 8A OM O O O WRITE(16) */ { 0x8A, D | T | W | O | B, "WRITE(16)" }, /* 8B O ORWRITE */ { 0x8B, D, "ORWRITE" }, /* 8C OO O OO O M READ ATTRIBUTE */ { 0x8C, D | T | W | O | M | B | V, "READ ATTRIBUTE" }, /* 8D OO O OO O O WRITE ATTRIBUTE */ { 0x8D, D | T | W | O | M | B | V, "WRITE ATTRIBUTE" }, /* 8E O O O O WRITE AND VERIFY(16) */ { 0x8E, D | W | O | B, "WRITE AND VERIFY(16)" }, /* 8F OO O O O VERIFY(16) */ { 0x8F, D | T | W | O | B, "VERIFY(16)" }, /* 90 O O O O PRE-FETCH(16) */ { 0x90, D | W | O | B, "PRE-FETCH(16)" }, /* 91 O O O O SYNCHRONIZE CACHE(16) */ { 0x91, D | W | O | B, "SYNCHRONIZE CACHE(16)" }, /* 91 O SPACE(16) */ { 0x91, T, "SPACE(16)" }, /* 92 Z O O LOCK UNLOCK CACHE(16) */ { 0x92, D | W | O, "LOCK UNLOCK CACHE(16)" }, /* 92 O LOCATE(16) */ { 0x92, T, "LOCATE(16)" }, /* 93 O WRITE SAME(16) */ { 0x93, D, "WRITE SAME(16)" }, /* 93 M ERASE(16) */ { 0x93, T, "ERASE(16)" }, /* 94 O ZBC OUT */ { 0x94, D, "ZBC OUT" }, /* 95 O ZBC OUT */ { 0x95, D, "ZBC OUT" }, /* 96 */ /* 97 */ /* 98 */ /* 99 */ /* 9A O WRITE STREAM(16) */ { 0x9A, D, "WRITE STREAM(16)" }, /* 9B OOOOOOOOOO OOO READ BUFFER(16) */ { 0x9B, ALL & ~(B) , "READ BUFFER(16)" }, /* 9C O WRITE ATOMIC(16) */ { 0x9C, D, "WRITE ATOMIC(16)" }, /* 9D SERVICE ACTION BIDIRECTIONAL */ { 0x9D, ALL, "SERVICE ACTION BIDIRECTIONAL" }, /* XXX KDM ALL for this? op-num.txt defines it for none.. */ /* 9E SERVICE ACTION IN(16) */ { 0x9E, ALL, "SERVICE ACTION IN(16)" }, /* XXX KDM ALL for this? op-num.txt defines it for ADC.. */ /* 9F M SERVICE ACTION OUT(16) */ { 0x9F, ALL, "SERVICE ACTION OUT(16)" }, /* A0 MMOOO OMMM OMO REPORT LUNS */ { 0xA0, ALL & ~(R | B), "REPORT LUNS" }, /* A1 O BLANK */ { 0xA1, R, "BLANK" }, /* A1 O O ATA COMMAND PASS THROUGH(12) */ { 0xA1, D | B, "ATA COMMAND PASS THROUGH(12)" }, /* A2 OO O O SECURITY PROTOCOL IN */ { 0xA2, D | T | R | V, "SECURITY PROTOCOL IN" }, /* A3 OOO O OOMOOOM MAINTENANCE (IN) */ { 0xA3, ALL & ~(P | R | F), "MAINTENANCE (IN)" }, /* A3 O SEND KEY */ { 0xA3, R, "SEND KEY" }, /* A4 OOO O OOOOOOO MAINTENANCE (OUT) */ { 0xA4, ALL & ~(P | R | F), "MAINTENANCE (OUT)" }, /* A4 O REPORT KEY */ { 0xA4, R, "REPORT KEY" }, /* A5 O O OM MOVE MEDIUM */ { 0xA5, T | W | O | M, "MOVE MEDIUM" }, /* A5 O PLAY AUDIO(12) */ { 0xA5, R, "PLAY AUDIO(12)" }, /* A6 O EXCHANGE MEDIUM */ { 0xA6, M, "EXCHANGE MEDIUM" }, /* A6 O LOAD/UNLOAD C/DVD */ { 0xA6, R, "LOAD/UNLOAD C/DVD" }, /* A7 ZZ O O MOVE MEDIUM ATTACHED */ { 0xA7, D | T | W | O, "MOVE MEDIUM ATTACHED" }, /* A7 O SET READ AHEAD */ { 0xA7, R, "SET READ AHEAD" }, /* A8 O OOO READ(12) */ { 0xA8, D | W | R | O, "READ(12)" }, /* A8 GET MESSAGE(12) */ { 0xA8, C, "GET MESSAGE(12)" }, /* A9 O SERVICE ACTION OUT(12) */ { 0xA9, V, "SERVICE ACTION OUT(12)" }, /* AA O OOO WRITE(12) */ { 0xAA, D | W | R | O, "WRITE(12)" }, /* AA SEND MESSAGE(12) */ { 0xAA, C, "SEND MESSAGE(12)" }, /* AB O O SERVICE ACTION IN(12) */ { 0xAB, R | V, "SERVICE ACTION IN(12)" }, /* AC O ERASE(12) */ { 0xAC, O, "ERASE(12)" }, /* AC O GET PERFORMANCE */ { 0xAC, R, "GET PERFORMANCE" }, /* AD O READ DVD STRUCTURE */ { 0xAD, R, "READ DVD STRUCTURE" }, /* AE O O O WRITE AND VERIFY(12) */ { 0xAE, D | W | O, "WRITE AND VERIFY(12)" }, /* AF O OZO VERIFY(12) */ { 0xAF, D | W | R | O, "VERIFY(12)" }, /* B0 ZZZ SEARCH DATA HIGH(12) */ { 0xB0, W | R | O, "SEARCH DATA HIGH(12)" }, /* B1 ZZZ SEARCH DATA EQUAL(12) */ { 0xB1, W | R | O, "SEARCH DATA EQUAL(12)" }, /* B2 ZZZ SEARCH DATA LOW(12) */ { 0xB2, W | R | O, "SEARCH DATA LOW(12)" }, /* B3 Z OZO SET LIMITS(12) */ { 0xB3, D | W | R | O, "SET LIMITS(12)" }, /* B4 ZZ OZO READ ELEMENT STATUS ATTACHED */ { 0xB4, D | T | W | R | O, "READ ELEMENT STATUS ATTACHED" }, /* B5 OO O O SECURITY PROTOCOL OUT */ { 0xB5, D | T | R | V, "SECURITY PROTOCOL OUT" }, /* B5 O REQUEST VOLUME ELEMENT ADDRESS */ { 0xB5, M, "REQUEST VOLUME ELEMENT ADDRESS" }, /* B6 O SEND VOLUME TAG */ { 0xB6, M, "SEND VOLUME TAG" }, /* B6 O SET STREAMING */ { 0xB6, R, "SET STREAMING" }, /* B7 O O READ DEFECT DATA(12) */ { 0xB7, D | O, "READ DEFECT DATA(12)" }, /* B8 O OZOM READ ELEMENT STATUS */ { 0xB8, T | W | R | O | M, "READ ELEMENT STATUS" }, /* B9 O READ CD MSF */ { 0xB9, R, "READ CD MSF" }, /* BA O O OOMO REDUNDANCY GROUP (IN) */ { 0xBA, D | W | O | M | A | E, "REDUNDANCY GROUP (IN)" }, /* BA O SCAN */ { 0xBA, R, "SCAN" }, /* BB O O OOOO REDUNDANCY GROUP (OUT) */ { 0xBB, D | W | O | M | A | E, "REDUNDANCY GROUP (OUT)" }, /* BB O SET CD SPEED */ { 0xBB, R, "SET CD SPEED" }, /* BC O O OOMO SPARE (IN) */ { 0xBC, D | W | O | M | A | E, "SPARE (IN)" }, /* BD O O OOOO SPARE (OUT) */ { 0xBD, D | W | O | M | A | E, "SPARE (OUT)" }, /* BD O MECHANISM STATUS */ { 0xBD, R, "MECHANISM STATUS" }, /* BE O O OOMO VOLUME SET (IN) */ { 0xBE, D | W | O | M | A | E, "VOLUME SET (IN)" }, /* BE O READ CD */ { 0xBE, R, "READ CD" }, /* BF O O OOOO VOLUME SET (OUT) */ { 0xBF, D | W | O | M | A | E, "VOLUME SET (OUT)" }, /* BF O SEND DVD STRUCTURE */ { 0xBF, R, "SEND DVD STRUCTURE" } }; const char * scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data) { caddr_t match; int i, j; u_int32_t opmask; u_int16_t pd_type; int num_ops[2]; struct op_table_entry *table[2]; int num_tables; /* * If we've got inquiry data, use it to determine what type of * device we're dealing with here. Otherwise, assume direct * access. */ if (inq_data == NULL) { pd_type = T_DIRECT; match = NULL; } else { pd_type = SID_TYPE(inq_data); match = cam_quirkmatch((caddr_t)inq_data, (caddr_t)scsi_op_quirk_table, sizeof(scsi_op_quirk_table)/ sizeof(*scsi_op_quirk_table), sizeof(*scsi_op_quirk_table), scsi_inquiry_match); } if (match != NULL) { table[0] = ((struct scsi_op_quirk_entry *)match)->op_table; num_ops[0] = ((struct scsi_op_quirk_entry *)match)->num_ops; table[1] = scsi_op_codes; num_ops[1] = sizeof(scsi_op_codes)/sizeof(scsi_op_codes[0]); num_tables = 2; } else { /* * If this is true, we have a vendor specific opcode that * wasn't covered in the quirk table. */ if ((opcode > 0xBF) || ((opcode > 0x5F) && (opcode < 0x80))) return("Vendor Specific Command"); table[0] = scsi_op_codes; num_ops[0] = sizeof(scsi_op_codes)/sizeof(scsi_op_codes[0]); num_tables = 1; } /* RBC is 'Simplified' Direct Access Device */ if (pd_type == T_RBC) pd_type = T_DIRECT; /* Map NODEVICE to Direct Access Device to handle REPORT LUNS, etc. */ if (pd_type == T_NODEVICE) pd_type = T_DIRECT; opmask = 1 << pd_type; for (j = 0; j < num_tables; j++) { for (i = 0;i < num_ops[j] && table[j][i].opcode <= opcode; i++){ if ((table[j][i].opcode == opcode) && ((table[j][i].opmask & opmask) != 0)) return(table[j][i].desc); } } /* * If we can't find a match for the command in the table, we just * assume it's a vendor specifc command. */ return("Vendor Specific Command"); } #else /* SCSI_NO_OP_STRINGS */ const char * scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data) { return(""); } #endif #if !defined(SCSI_NO_SENSE_STRINGS) #define SST(asc, ascq, action, desc) \ asc, ascq, action, desc #else const char empty_string[] = ""; #define SST(asc, ascq, action, desc) \ asc, ascq, action, empty_string #endif const struct sense_key_table_entry sense_key_table[] = { { SSD_KEY_NO_SENSE, SS_NOP, "NO SENSE" }, { SSD_KEY_RECOVERED_ERROR, SS_NOP|SSQ_PRINT_SENSE, "RECOVERED ERROR" }, { SSD_KEY_NOT_READY, SS_RDEF, "NOT READY" }, { SSD_KEY_MEDIUM_ERROR, SS_RDEF, "MEDIUM ERROR" }, { SSD_KEY_HARDWARE_ERROR, SS_RDEF, "HARDWARE FAILURE" }, { SSD_KEY_ILLEGAL_REQUEST, SS_FATAL|EINVAL, "ILLEGAL REQUEST" }, { SSD_KEY_UNIT_ATTENTION, SS_FATAL|ENXIO, "UNIT ATTENTION" }, { SSD_KEY_DATA_PROTECT, SS_FATAL|EACCES, "DATA PROTECT" }, { SSD_KEY_BLANK_CHECK, SS_FATAL|ENOSPC, "BLANK CHECK" }, { SSD_KEY_Vendor_Specific, SS_FATAL|EIO, "Vendor Specific" }, { SSD_KEY_COPY_ABORTED, SS_FATAL|EIO, "COPY ABORTED" }, { SSD_KEY_ABORTED_COMMAND, SS_RDEF, "ABORTED COMMAND" }, { SSD_KEY_EQUAL, SS_NOP, "EQUAL" }, { SSD_KEY_VOLUME_OVERFLOW, SS_FATAL|EIO, "VOLUME OVERFLOW" }, { SSD_KEY_MISCOMPARE, SS_NOP, "MISCOMPARE" }, { SSD_KEY_COMPLETED, SS_NOP, "COMPLETED" } }; const int sense_key_table_size = sizeof(sense_key_table)/sizeof(sense_key_table[0]); static struct asc_table_entry quantum_fireball_entries[] = { { SST(0x04, 0x0b, SS_START | SSQ_DECREMENT_COUNT | ENXIO, "Logical unit not ready, initializing cmd. required") } }; static struct asc_table_entry sony_mo_entries[] = { { SST(0x04, 0x00, SS_START | SSQ_DECREMENT_COUNT | ENXIO, "Logical unit not ready, cause not reportable") } }; static struct asc_table_entry hgst_entries[] = { { SST(0x04, 0xF0, SS_RDEF, "Vendor Unique - Logical Unit Not Ready") }, { SST(0x0A, 0x01, SS_RDEF, "Unrecovered Super Certification Log Write Error") }, { SST(0x0A, 0x02, SS_RDEF, "Unrecovered Super Certification Log Read Error") }, { SST(0x15, 0x03, SS_RDEF, "Unrecovered Sector Error") }, { SST(0x3E, 0x04, SS_RDEF, "Unrecovered Self-Test Hard-Cache Test Fail") }, { SST(0x3E, 0x05, SS_RDEF, "Unrecovered Self-Test OTF-Cache Fail") }, { SST(0x40, 0x00, SS_RDEF, "Unrecovered SAT No Buffer Overflow Error") }, { SST(0x40, 0x01, SS_RDEF, "Unrecovered SAT Buffer Overflow Error") }, { SST(0x40, 0x02, SS_RDEF, "Unrecovered SAT No Buffer Overflow With ECS Fault") }, { SST(0x40, 0x03, SS_RDEF, "Unrecovered SAT Buffer Overflow With ECS Fault") }, { SST(0x40, 0x81, SS_RDEF, "DRAM Failure") }, { SST(0x44, 0x0B, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xF2, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xF6, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xF9, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xFA, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x5D, 0x22, SS_RDEF, "Extreme Over-Temperature Warning") }, { SST(0x5D, 0x50, SS_RDEF, "Load/Unload cycle Count Warning") }, { SST(0x81, 0x00, SS_RDEF, "Vendor Unique - Internal Logic Error") }, { SST(0x85, 0x00, SS_RDEF, "Vendor Unique - Internal Key Seed Error") }, }; static struct asc_table_entry seagate_entries[] = { { SST(0x04, 0xF0, SS_RDEF, "Logical Unit Not Ready, super certify in Progress") }, { SST(0x08, 0x86, SS_RDEF, "Write Fault Data Corruption") }, { SST(0x09, 0x0D, SS_RDEF, "Tracking Failure") }, { SST(0x09, 0x0E, SS_RDEF, "ETF Failure") }, { SST(0x0B, 0x5D, SS_RDEF, "Pre-SMART Warning") }, { SST(0x0B, 0x85, SS_RDEF, "5V Voltage Warning") }, { SST(0x0B, 0x8C, SS_RDEF, "12V Voltage Warning") }, { SST(0x0C, 0xFF, SS_RDEF, "Write Error - Too many error recovery revs") }, { SST(0x11, 0xFF, SS_RDEF, "Unrecovered Read Error - Too many error recovery revs") }, { SST(0x19, 0x0E, SS_RDEF, "Fewer than 1/2 defect list copies") }, { SST(0x20, 0xF3, SS_RDEF, "Illegal CDB linked to skip mask cmd") }, { SST(0x24, 0xF0, SS_RDEF, "Illegal byte in CDB, LBA not matching") }, { SST(0x24, 0xF1, SS_RDEF, "Illegal byte in CDB, LEN not matching") }, { SST(0x24, 0xF2, SS_RDEF, "Mask not matching transfer length") }, { SST(0x24, 0xF3, SS_RDEF, "Drive formatted without plist") }, { SST(0x26, 0x95, SS_RDEF, "Invalid Field Parameter - CAP File") }, { SST(0x26, 0x96, SS_RDEF, "Invalid Field Parameter - RAP File") }, { SST(0x26, 0x97, SS_RDEF, "Invalid Field Parameter - TMS Firmware Tag") }, { SST(0x26, 0x98, SS_RDEF, "Invalid Field Parameter - Check Sum") }, { SST(0x26, 0x99, SS_RDEF, "Invalid Field Parameter - Firmware Tag") }, { SST(0x29, 0x08, SS_RDEF, "Write Log Dump data") }, { SST(0x29, 0x09, SS_RDEF, "Write Log Dump data") }, { SST(0x29, 0x0A, SS_RDEF, "Reserved disk space") }, { SST(0x29, 0x0B, SS_RDEF, "SDBP") }, { SST(0x29, 0x0C, SS_RDEF, "SDBP") }, { SST(0x31, 0x91, SS_RDEF, "Format Corrupted World Wide Name (WWN) is Invalid") }, { SST(0x32, 0x03, SS_RDEF, "Defect List - Length exceeds Command Allocated Length") }, { SST(0x33, 0x00, SS_RDEF, "Flash not ready for access") }, { SST(0x3F, 0x70, SS_RDEF, "Invalid RAP block") }, { SST(0x3F, 0x71, SS_RDEF, "RAP/ETF mismatch") }, { SST(0x3F, 0x90, SS_RDEF, "Invalid CAP block") }, { SST(0x3F, 0x91, SS_RDEF, "World Wide Name (WWN) Mismatch") }, { SST(0x40, 0x01, SS_RDEF, "DRAM Parity Error") }, { SST(0x40, 0x02, SS_RDEF, "DRAM Parity Error") }, { SST(0x42, 0x0A, SS_RDEF, "Loopback Test") }, { SST(0x42, 0x0B, SS_RDEF, "Loopback Test") }, { SST(0x44, 0xF2, SS_RDEF, "Compare error during data integrity check") }, { SST(0x44, 0xF6, SS_RDEF, "Unrecoverable error during data integrity check") }, { SST(0x47, 0x80, SS_RDEF, "Fibre Channel Sequence Error") }, { SST(0x4E, 0x01, SS_RDEF, "Information Unit Too Short") }, { SST(0x80, 0x00, SS_RDEF, "General Firmware Error / Command Timeout") }, { SST(0x80, 0x01, SS_RDEF, "Command Timeout") }, { SST(0x80, 0x02, SS_RDEF, "Command Timeout") }, { SST(0x80, 0x80, SS_RDEF, "FC FIFO Error During Read Transfer") }, { SST(0x80, 0x81, SS_RDEF, "FC FIFO Error During Write Transfer") }, { SST(0x80, 0x82, SS_RDEF, "DISC FIFO Error During Read Transfer") }, { SST(0x80, 0x83, SS_RDEF, "DISC FIFO Error During Write Transfer") }, { SST(0x80, 0x84, SS_RDEF, "LBA Seeded LRC Error on Read") }, { SST(0x80, 0x85, SS_RDEF, "LBA Seeded LRC Error on Write") }, { SST(0x80, 0x86, SS_RDEF, "IOEDC Error on Read") }, { SST(0x80, 0x87, SS_RDEF, "IOEDC Error on Write") }, { SST(0x80, 0x88, SS_RDEF, "Host Parity Check Failed") }, { SST(0x80, 0x89, SS_RDEF, "IOEDC error on read detected by formatter") }, { SST(0x80, 0x8A, SS_RDEF, "Host Parity Errors / Host FIFO Initialization Failed") }, { SST(0x80, 0x8B, SS_RDEF, "Host Parity Errors") }, { SST(0x80, 0x8C, SS_RDEF, "Host Parity Errors") }, { SST(0x80, 0x8D, SS_RDEF, "Host Parity Errors") }, { SST(0x81, 0x00, SS_RDEF, "LA Check Failed") }, { SST(0x82, 0x00, SS_RDEF, "Internal client detected insufficient buffer") }, { SST(0x84, 0x00, SS_RDEF, "Scheduled Diagnostic And Repair") }, }; static struct scsi_sense_quirk_entry sense_quirk_table[] = { { /* * XXX The Quantum Fireball ST and SE like to return 0x04 0x0b * when they really should return 0x04 0x02. */ {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "FIREBALL S*", "*"}, /*num_sense_keys*/0, sizeof(quantum_fireball_entries)/sizeof(struct asc_table_entry), /*sense key entries*/NULL, quantum_fireball_entries }, { /* * This Sony MO drive likes to return 0x04, 0x00 when it * isn't spun up. */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SONY", "SMO-*", "*"}, /*num_sense_keys*/0, sizeof(sony_mo_entries)/sizeof(struct asc_table_entry), /*sense key entries*/NULL, sony_mo_entries }, { /* * HGST vendor-specific error codes */ {T_DIRECT, SIP_MEDIA_FIXED, "HGST", "*", "*"}, /*num_sense_keys*/0, sizeof(hgst_entries)/sizeof(struct asc_table_entry), /*sense key entries*/NULL, hgst_entries }, { /* * SEAGATE vendor-specific error codes */ {T_DIRECT, SIP_MEDIA_FIXED, "SEAGATE", "*", "*"}, /*num_sense_keys*/0, sizeof(seagate_entries)/sizeof(struct asc_table_entry), /*sense key entries*/NULL, seagate_entries } }; const int sense_quirk_table_size = sizeof(sense_quirk_table)/sizeof(sense_quirk_table[0]); static struct asc_table_entry asc_table[] = { /* * From: http://www.t10.org/lists/asc-num.txt * Modifications by Jung-uk Kim (jkim@FreeBSD.org) */ /* * File: ASC-NUM.TXT * * SCSI ASC/ASCQ Assignments * Numeric Sorted Listing * as of 8/12/15 * * D - DIRECT ACCESS DEVICE (SBC-2) device column key * .T - SEQUENTIAL ACCESS DEVICE (SSC) ------------------- * . L - PRINTER DEVICE (SSC) blank = reserved * . P - PROCESSOR DEVICE (SPC) not blank = allowed * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC-2) * . . R - CD DEVICE (MMC) * . . O - OPTICAL MEMORY DEVICE (SBC-2) * . . .M - MEDIA CHANGER DEVICE (SMC) * . . . A - STORAGE ARRAY DEVICE (SCC) * . . . E - ENCLOSURE SERVICES DEVICE (SES) * . . . .B - SIMPLIFIED DIRECT-ACCESS DEVICE (RBC) * . . . . K - OPTICAL CARD READER/WRITER DEVICE (OCRW) * . . . . V - AUTOMATION/DRIVE INTERFACE (ADC) * . . . . .F - OBJECT-BASED STORAGE (OSD) * DTLPWROMAEBKVF * ASC ASCQ Action * Description */ /* DTLPWROMAEBKVF */ { SST(0x00, 0x00, SS_NOP, "No additional sense information") }, /* T */ { SST(0x00, 0x01, SS_RDEF, "Filemark detected") }, /* T */ { SST(0x00, 0x02, SS_RDEF, "End-of-partition/medium detected") }, /* T */ { SST(0x00, 0x03, SS_RDEF, "Setmark detected") }, /* T */ { SST(0x00, 0x04, SS_RDEF, "Beginning-of-partition/medium detected") }, /* TL */ { SST(0x00, 0x05, SS_RDEF, "End-of-data detected") }, /* DTLPWROMAEBKVF */ { SST(0x00, 0x06, SS_RDEF, "I/O process terminated") }, /* T */ { SST(0x00, 0x07, SS_RDEF, /* XXX TBD */ "Programmable early warning detected") }, /* R */ { SST(0x00, 0x11, SS_FATAL | EBUSY, "Audio play operation in progress") }, /* R */ { SST(0x00, 0x12, SS_NOP, "Audio play operation paused") }, /* R */ { SST(0x00, 0x13, SS_NOP, "Audio play operation successfully completed") }, /* R */ { SST(0x00, 0x14, SS_RDEF, "Audio play operation stopped due to error") }, /* R */ { SST(0x00, 0x15, SS_NOP, "No current audio status to return") }, /* DTLPWROMAEBKVF */ { SST(0x00, 0x16, SS_FATAL | EBUSY, "Operation in progress") }, /* DTL WROMAEBKVF */ { SST(0x00, 0x17, SS_RDEF, "Cleaning requested") }, /* T */ { SST(0x00, 0x18, SS_RDEF, /* XXX TBD */ "Erase operation in progress") }, /* T */ { SST(0x00, 0x19, SS_RDEF, /* XXX TBD */ "Locate operation in progress") }, /* T */ { SST(0x00, 0x1A, SS_RDEF, /* XXX TBD */ "Rewind operation in progress") }, /* T */ { SST(0x00, 0x1B, SS_RDEF, /* XXX TBD */ "Set capacity operation in progress") }, /* T */ { SST(0x00, 0x1C, SS_RDEF, /* XXX TBD */ "Verify operation in progress") }, /* DT B */ { SST(0x00, 0x1D, SS_RDEF, /* XXX TBD */ "ATA pass through information available") }, /* DT R MAEBKV */ { SST(0x00, 0x1E, SS_RDEF, /* XXX TBD */ "Conflicting SA creation request") }, /* DT B */ { SST(0x00, 0x1F, SS_RDEF, /* XXX TBD */ "Logical unit transitioning to another power condition") }, /* DT P B */ { SST(0x00, 0x20, SS_RDEF, /* XXX TBD */ "Extended copy information available") }, /* D */ { SST(0x00, 0x21, SS_RDEF, /* XXX TBD */ "Atomic command aborted due to ACA") }, /* D W O BK */ { SST(0x01, 0x00, SS_RDEF, "No index/sector signal") }, /* D WRO BK */ { SST(0x02, 0x00, SS_RDEF, "No seek complete") }, /* DTL W O BK */ { SST(0x03, 0x00, SS_RDEF, "Peripheral device write fault") }, /* T */ { SST(0x03, 0x01, SS_RDEF, "No write current") }, /* T */ { SST(0x03, 0x02, SS_RDEF, "Excessive write errors") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x00, SS_RDEF, "Logical unit not ready, cause not reportable") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x01, SS_WAIT | EBUSY, "Logical unit is in process of becoming ready") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x02, SS_START | SSQ_DECREMENT_COUNT | ENXIO, "Logical unit not ready, initializing command required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x03, SS_FATAL | ENXIO, "Logical unit not ready, manual intervention required") }, /* DTL RO B */ { SST(0x04, 0x04, SS_FATAL | EBUSY, "Logical unit not ready, format in progress") }, /* DT W O A BK F */ { SST(0x04, 0x05, SS_FATAL | EBUSY, "Logical unit not ready, rebuild in progress") }, /* DT W O A BK */ { SST(0x04, 0x06, SS_FATAL | EBUSY, "Logical unit not ready, recalculation in progress") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x07, SS_FATAL | EBUSY, "Logical unit not ready, operation in progress") }, /* R */ { SST(0x04, 0x08, SS_FATAL | EBUSY, "Logical unit not ready, long write in progress") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x09, SS_RDEF, /* XXX TBD */ "Logical unit not ready, self-test in progress") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x0A, SS_WAIT | ENXIO, "Logical unit not accessible, asymmetric access state transition")}, /* DTLPWROMAEBKVF */ { SST(0x04, 0x0B, SS_FATAL | ENXIO, "Logical unit not accessible, target port in standby state") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x0C, SS_FATAL | ENXIO, "Logical unit not accessible, target port in unavailable state") }, /* F */ { SST(0x04, 0x0D, SS_RDEF, /* XXX TBD */ "Logical unit not ready, structure check required") }, /* DTL WR MAEBKVF */ { SST(0x04, 0x0E, SS_RDEF, /* XXX TBD */ "Logical unit not ready, security session in progress") }, /* DT WROM B */ { SST(0x04, 0x10, SS_RDEF, /* XXX TBD */ "Logical unit not ready, auxiliary memory not accessible") }, /* DT WRO AEB VF */ { SST(0x04, 0x11, SS_WAIT | EBUSY, "Logical unit not ready, notify (enable spinup) required") }, /* M V */ { SST(0x04, 0x12, SS_RDEF, /* XXX TBD */ "Logical unit not ready, offline") }, /* DT R MAEBKV */ { SST(0x04, 0x13, SS_RDEF, /* XXX TBD */ "Logical unit not ready, SA creation in progress") }, /* D B */ { SST(0x04, 0x14, SS_RDEF, /* XXX TBD */ "Logical unit not ready, space allocation in progress") }, /* M */ { SST(0x04, 0x15, SS_RDEF, /* XXX TBD */ "Logical unit not ready, robotics disabled") }, /* M */ { SST(0x04, 0x16, SS_RDEF, /* XXX TBD */ "Logical unit not ready, configuration required") }, /* M */ { SST(0x04, 0x17, SS_RDEF, /* XXX TBD */ "Logical unit not ready, calibration required") }, /* M */ { SST(0x04, 0x18, SS_RDEF, /* XXX TBD */ "Logical unit not ready, a door is open") }, /* M */ { SST(0x04, 0x19, SS_RDEF, /* XXX TBD */ "Logical unit not ready, operating in sequential mode") }, /* DT B */ { SST(0x04, 0x1A, SS_RDEF, /* XXX TBD */ "Logical unit not ready, START/STOP UNIT command in progress") }, /* D B */ { SST(0x04, 0x1B, SS_RDEF, /* XXX TBD */ "Logical unit not ready, sanitize in progress") }, /* DT MAEB */ { SST(0x04, 0x1C, SS_RDEF, /* XXX TBD */ "Logical unit not ready, additional power use not yet granted") }, /* D */ { SST(0x04, 0x1D, SS_RDEF, /* XXX TBD */ "Logical unit not ready, configuration in progress") }, /* D */ { SST(0x04, 0x1E, SS_FATAL | ENXIO, "Logical unit not ready, microcode activation required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x1F, SS_FATAL | ENXIO, "Logical unit not ready, microcode download required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x20, SS_RDEF, /* XXX TBD */ "Logical unit not ready, logical unit reset required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x21, SS_RDEF, /* XXX TBD */ "Logical unit not ready, hard reset required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x22, SS_RDEF, /* XXX TBD */ "Logical unit not ready, power cycle required") }, /* DTL WROMAEBKVF */ { SST(0x05, 0x00, SS_RDEF, "Logical unit does not respond to selection") }, /* D WROM BK */ { SST(0x06, 0x00, SS_RDEF, "No reference position found") }, /* DTL WROM BK */ { SST(0x07, 0x00, SS_RDEF, "Multiple peripheral devices selected") }, /* DTL WROMAEBKVF */ { SST(0x08, 0x00, SS_RDEF, "Logical unit communication failure") }, /* DTL WROMAEBKVF */ { SST(0x08, 0x01, SS_RDEF, "Logical unit communication time-out") }, /* DTL WROMAEBKVF */ { SST(0x08, 0x02, SS_RDEF, "Logical unit communication parity error") }, /* DT ROM BK */ { SST(0x08, 0x03, SS_RDEF, "Logical unit communication CRC error (Ultra-DMA/32)") }, /* DTLPWRO K */ { SST(0x08, 0x04, SS_RDEF, /* XXX TBD */ "Unreachable copy target") }, /* DT WRO B */ { SST(0x09, 0x00, SS_RDEF, "Track following error") }, /* WRO K */ { SST(0x09, 0x01, SS_RDEF, "Tracking servo failure") }, /* WRO K */ { SST(0x09, 0x02, SS_RDEF, "Focus servo failure") }, /* WRO */ { SST(0x09, 0x03, SS_RDEF, "Spindle servo failure") }, /* DT WRO B */ { SST(0x09, 0x04, SS_RDEF, "Head select fault") }, /* DT RO B */ { SST(0x09, 0x05, SS_RDEF, "Vibration induced tracking error") }, /* DTLPWROMAEBKVF */ { SST(0x0A, 0x00, SS_FATAL | ENOSPC, "Error log overflow") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x00, SS_RDEF, "Warning") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x01, SS_RDEF, "Warning - specified temperature exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x02, SS_RDEF, "Warning - enclosure degraded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x03, SS_RDEF, /* XXX TBD */ "Warning - background self-test failed") }, /* DTLPWRO AEBKVF */ { SST(0x0B, 0x04, SS_RDEF, /* XXX TBD */ "Warning - background pre-scan detected medium error") }, /* DTLPWRO AEBKVF */ { SST(0x0B, 0x05, SS_RDEF, /* XXX TBD */ "Warning - background medium scan detected medium error") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x06, SS_RDEF, /* XXX TBD */ "Warning - non-volatile cache now volatile") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x07, SS_RDEF, /* XXX TBD */ "Warning - degraded power to non-volatile cache") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x08, SS_RDEF, /* XXX TBD */ "Warning - power loss expected") }, /* D */ { SST(0x0B, 0x09, SS_RDEF, /* XXX TBD */ "Warning - device statistics notification available") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0A, SS_RDEF, /* XXX TBD */ "Warning - High critical temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0B, SS_RDEF, /* XXX TBD */ "Warning - Low critical temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0C, SS_RDEF, /* XXX TBD */ "Warning - High operating temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0D, SS_RDEF, /* XXX TBD */ "Warning - Low operating temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0E, SS_RDEF, /* XXX TBD */ "Warning - High citical humidity limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0F, SS_RDEF, /* XXX TBD */ "Warning - Low citical humidity limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x10, SS_RDEF, /* XXX TBD */ "Warning - High operating humidity limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x11, SS_RDEF, /* XXX TBD */ "Warning - Low operating humidity limit exceeded") }, /* T R */ { SST(0x0C, 0x00, SS_RDEF, "Write error") }, /* K */ { SST(0x0C, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Write error - recovered with auto reallocation") }, /* D W O BK */ { SST(0x0C, 0x02, SS_RDEF, "Write error - auto reallocation failed") }, /* D W O BK */ { SST(0x0C, 0x03, SS_RDEF, "Write error - recommend reassignment") }, /* DT W O B */ { SST(0x0C, 0x04, SS_RDEF, "Compression check miscompare error") }, /* DT W O B */ { SST(0x0C, 0x05, SS_RDEF, "Data expansion occurred during compression") }, /* DT W O B */ { SST(0x0C, 0x06, SS_RDEF, "Block not compressible") }, /* R */ { SST(0x0C, 0x07, SS_RDEF, "Write error - recovery needed") }, /* R */ { SST(0x0C, 0x08, SS_RDEF, "Write error - recovery failed") }, /* R */ { SST(0x0C, 0x09, SS_RDEF, "Write error - loss of streaming") }, /* R */ { SST(0x0C, 0x0A, SS_RDEF, "Write error - padding blocks added") }, /* DT WROM B */ { SST(0x0C, 0x0B, SS_RDEF, /* XXX TBD */ "Auxiliary memory write error") }, /* DTLPWRO AEBKVF */ { SST(0x0C, 0x0C, SS_RDEF, /* XXX TBD */ "Write error - unexpected unsolicited data") }, /* DTLPWRO AEBKVF */ { SST(0x0C, 0x0D, SS_RDEF, /* XXX TBD */ "Write error - not enough unsolicited data") }, /* DT W O BK */ { SST(0x0C, 0x0E, SS_RDEF, /* XXX TBD */ "Multiple write errors") }, /* R */ { SST(0x0C, 0x0F, SS_RDEF, /* XXX TBD */ "Defects in error window") }, /* D */ { SST(0x0C, 0x10, SS_RDEF, /* XXX TBD */ "Incomplete multiple atomic write operations") }, /* D */ { SST(0x0C, 0x11, SS_RDEF, /* XXX TBD */ "Write error - recovery scan needed") }, /* D */ { SST(0x0C, 0x12, SS_RDEF, /* XXX TBD */ "Write error - insufficient zone resources") }, /* DTLPWRO A K */ { SST(0x0D, 0x00, SS_RDEF, /* XXX TBD */ "Error detected by third party temporary initiator") }, /* DTLPWRO A K */ { SST(0x0D, 0x01, SS_RDEF, /* XXX TBD */ "Third party device failure") }, /* DTLPWRO A K */ { SST(0x0D, 0x02, SS_RDEF, /* XXX TBD */ "Copy target device not reachable") }, /* DTLPWRO A K */ { SST(0x0D, 0x03, SS_RDEF, /* XXX TBD */ "Incorrect copy target device type") }, /* DTLPWRO A K */ { SST(0x0D, 0x04, SS_RDEF, /* XXX TBD */ "Copy target device data underrun") }, /* DTLPWRO A K */ { SST(0x0D, 0x05, SS_RDEF, /* XXX TBD */ "Copy target device data overrun") }, /* DT PWROMAEBK F */ { SST(0x0E, 0x00, SS_RDEF, /* XXX TBD */ "Invalid information unit") }, /* DT PWROMAEBK F */ { SST(0x0E, 0x01, SS_RDEF, /* XXX TBD */ "Information unit too short") }, /* DT PWROMAEBK F */ { SST(0x0E, 0x02, SS_RDEF, /* XXX TBD */ "Information unit too long") }, /* DT P R MAEBK F */ { SST(0x0E, 0x03, SS_RDEF, /* XXX TBD */ "Invalid field in command information unit") }, /* D W O BK */ { SST(0x10, 0x00, SS_RDEF, "ID CRC or ECC error") }, /* DT W O */ { SST(0x10, 0x01, SS_RDEF, /* XXX TBD */ "Logical block guard check failed") }, /* DT W O */ { SST(0x10, 0x02, SS_RDEF, /* XXX TBD */ "Logical block application tag check failed") }, /* DT W O */ { SST(0x10, 0x03, SS_RDEF, /* XXX TBD */ "Logical block reference tag check failed") }, /* T */ { SST(0x10, 0x04, SS_RDEF, /* XXX TBD */ "Logical block protection error on recovered buffer data") }, /* T */ { SST(0x10, 0x05, SS_RDEF, /* XXX TBD */ "Logical block protection method error") }, /* DT WRO BK */ { SST(0x11, 0x00, SS_FATAL|EIO, "Unrecovered read error") }, /* DT WRO BK */ { SST(0x11, 0x01, SS_FATAL|EIO, "Read retries exhausted") }, /* DT WRO BK */ { SST(0x11, 0x02, SS_FATAL|EIO, "Error too long to correct") }, /* DT W O BK */ { SST(0x11, 0x03, SS_FATAL|EIO, "Multiple read errors") }, /* D W O BK */ { SST(0x11, 0x04, SS_FATAL|EIO, "Unrecovered read error - auto reallocate failed") }, /* WRO B */ { SST(0x11, 0x05, SS_FATAL|EIO, "L-EC uncorrectable error") }, /* WRO B */ { SST(0x11, 0x06, SS_FATAL|EIO, "CIRC unrecovered error") }, /* W O B */ { SST(0x11, 0x07, SS_RDEF, "Data re-synchronization error") }, /* T */ { SST(0x11, 0x08, SS_RDEF, "Incomplete block read") }, /* T */ { SST(0x11, 0x09, SS_RDEF, "No gap found") }, /* DT O BK */ { SST(0x11, 0x0A, SS_RDEF, "Miscorrected error") }, /* D W O BK */ { SST(0x11, 0x0B, SS_FATAL|EIO, "Unrecovered read error - recommend reassignment") }, /* D W O BK */ { SST(0x11, 0x0C, SS_FATAL|EIO, "Unrecovered read error - recommend rewrite the data") }, /* DT WRO B */ { SST(0x11, 0x0D, SS_RDEF, "De-compression CRC error") }, /* DT WRO B */ { SST(0x11, 0x0E, SS_RDEF, "Cannot decompress using declared algorithm") }, /* R */ { SST(0x11, 0x0F, SS_RDEF, "Error reading UPC/EAN number") }, /* R */ { SST(0x11, 0x10, SS_RDEF, "Error reading ISRC number") }, /* R */ { SST(0x11, 0x11, SS_RDEF, "Read error - loss of streaming") }, /* DT WROM B */ { SST(0x11, 0x12, SS_RDEF, /* XXX TBD */ "Auxiliary memory read error") }, /* DTLPWRO AEBKVF */ { SST(0x11, 0x13, SS_RDEF, /* XXX TBD */ "Read error - failed retransmission request") }, /* D */ { SST(0x11, 0x14, SS_RDEF, /* XXX TBD */ "Read error - LBA marked bad by application client") }, /* D */ { SST(0x11, 0x15, SS_RDEF, /* XXX TBD */ "Write after sanitize required") }, /* D W O BK */ { SST(0x12, 0x00, SS_RDEF, "Address mark not found for ID field") }, /* D W O BK */ { SST(0x13, 0x00, SS_RDEF, "Address mark not found for data field") }, /* DTL WRO BK */ { SST(0x14, 0x00, SS_RDEF, "Recorded entity not found") }, /* DT WRO BK */ { SST(0x14, 0x01, SS_RDEF, "Record not found") }, /* T */ { SST(0x14, 0x02, SS_RDEF, "Filemark or setmark not found") }, /* T */ { SST(0x14, 0x03, SS_RDEF, "End-of-data not found") }, /* T */ { SST(0x14, 0x04, SS_RDEF, "Block sequence error") }, /* DT W O BK */ { SST(0x14, 0x05, SS_RDEF, "Record not found - recommend reassignment") }, /* DT W O BK */ { SST(0x14, 0x06, SS_RDEF, "Record not found - data auto-reallocated") }, /* T */ { SST(0x14, 0x07, SS_RDEF, /* XXX TBD */ "Locate operation failure") }, /* DTL WROM BK */ { SST(0x15, 0x00, SS_RDEF, "Random positioning error") }, /* DTL WROM BK */ { SST(0x15, 0x01, SS_RDEF, "Mechanical positioning error") }, /* DT WRO BK */ { SST(0x15, 0x02, SS_RDEF, "Positioning error detected by read of medium") }, /* D W O BK */ { SST(0x16, 0x00, SS_RDEF, "Data synchronization mark error") }, /* D W O BK */ { SST(0x16, 0x01, SS_RDEF, "Data sync error - data rewritten") }, /* D W O BK */ { SST(0x16, 0x02, SS_RDEF, "Data sync error - recommend rewrite") }, /* D W O BK */ { SST(0x16, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Data sync error - data auto-reallocated") }, /* D W O BK */ { SST(0x16, 0x04, SS_RDEF, "Data sync error - recommend reassignment") }, /* DT WRO BK */ { SST(0x17, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with no error correction applied") }, /* DT WRO BK */ { SST(0x17, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with retries") }, /* DT WRO BK */ { SST(0x17, 0x02, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with positive head offset") }, /* DT WRO BK */ { SST(0x17, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with negative head offset") }, /* WRO B */ { SST(0x17, 0x04, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with retries and/or CIRC applied") }, /* D WRO BK */ { SST(0x17, 0x05, SS_NOP | SSQ_PRINT_SENSE, "Recovered data using previous sector ID") }, /* D W O BK */ { SST(0x17, 0x06, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - data auto-reallocated") }, /* D WRO BK */ { SST(0x17, 0x07, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - recommend reassignment") }, /* D WRO BK */ { SST(0x17, 0x08, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - recommend rewrite") }, /* D WRO BK */ { SST(0x17, 0x09, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - data rewritten") }, /* DT WRO BK */ { SST(0x18, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with error correction applied") }, /* D WRO BK */ { SST(0x18, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with error corr. & retries applied") }, /* D WRO BK */ { SST(0x18, 0x02, SS_NOP | SSQ_PRINT_SENSE, "Recovered data - data auto-reallocated") }, /* R */ { SST(0x18, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with CIRC") }, /* R */ { SST(0x18, 0x04, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with L-EC") }, /* D WRO BK */ { SST(0x18, 0x05, SS_NOP | SSQ_PRINT_SENSE, "Recovered data - recommend reassignment") }, /* D WRO BK */ { SST(0x18, 0x06, SS_NOP | SSQ_PRINT_SENSE, "Recovered data - recommend rewrite") }, /* D W O BK */ { SST(0x18, 0x07, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with ECC - data rewritten") }, /* R */ { SST(0x18, 0x08, SS_RDEF, /* XXX TBD */ "Recovered data with linking") }, /* D O K */ { SST(0x19, 0x00, SS_RDEF, "Defect list error") }, /* D O K */ { SST(0x19, 0x01, SS_RDEF, "Defect list not available") }, /* D O K */ { SST(0x19, 0x02, SS_RDEF, "Defect list error in primary list") }, /* D O K */ { SST(0x19, 0x03, SS_RDEF, "Defect list error in grown list") }, /* DTLPWROMAEBKVF */ { SST(0x1A, 0x00, SS_RDEF, "Parameter list length error") }, /* DTLPWROMAEBKVF */ { SST(0x1B, 0x00, SS_RDEF, "Synchronous data transfer error") }, /* D O BK */ { SST(0x1C, 0x00, SS_RDEF, "Defect list not found") }, /* D O BK */ { SST(0x1C, 0x01, SS_RDEF, "Primary defect list not found") }, /* D O BK */ { SST(0x1C, 0x02, SS_RDEF, "Grown defect list not found") }, /* DT WRO BK */ { SST(0x1D, 0x00, SS_FATAL, "Miscompare during verify operation") }, /* D B */ { SST(0x1D, 0x01, SS_RDEF, /* XXX TBD */ "Miscomparable verify of unmapped LBA") }, /* D W O BK */ { SST(0x1E, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Recovered ID with ECC correction") }, /* D O K */ { SST(0x1F, 0x00, SS_RDEF, "Partial defect list transfer") }, /* DTLPWROMAEBKVF */ { SST(0x20, 0x00, SS_FATAL | EINVAL, "Invalid command operation code") }, /* DT PWROMAEBK */ { SST(0x20, 0x01, SS_RDEF, /* XXX TBD */ "Access denied - initiator pending-enrolled") }, /* DT PWROMAEBK */ { SST(0x20, 0x02, SS_RDEF, /* XXX TBD */ "Access denied - no access rights") }, /* DT PWROMAEBK */ { SST(0x20, 0x03, SS_RDEF, /* XXX TBD */ "Access denied - invalid mgmt ID key") }, /* T */ { SST(0x20, 0x04, SS_RDEF, /* XXX TBD */ "Illegal command while in write capable state") }, /* T */ { SST(0x20, 0x05, SS_RDEF, /* XXX TBD */ "Obsolete") }, /* T */ { SST(0x20, 0x06, SS_RDEF, /* XXX TBD */ "Illegal command while in explicit address mode") }, /* T */ { SST(0x20, 0x07, SS_RDEF, /* XXX TBD */ "Illegal command while in implicit address mode") }, /* DT PWROMAEBK */ { SST(0x20, 0x08, SS_RDEF, /* XXX TBD */ "Access denied - enrollment conflict") }, /* DT PWROMAEBK */ { SST(0x20, 0x09, SS_RDEF, /* XXX TBD */ "Access denied - invalid LU identifier") }, /* DT PWROMAEBK */ { SST(0x20, 0x0A, SS_RDEF, /* XXX TBD */ "Access denied - invalid proxy token") }, /* DT PWROMAEBK */ { SST(0x20, 0x0B, SS_RDEF, /* XXX TBD */ "Access denied - ACL LUN conflict") }, /* T */ { SST(0x20, 0x0C, SS_FATAL | EINVAL, "Illegal command when not in append-only mode") }, /* DT WRO BK */ { SST(0x21, 0x00, SS_FATAL | EINVAL, "Logical block address out of range") }, /* DT WROM BK */ { SST(0x21, 0x01, SS_FATAL | EINVAL, "Invalid element address") }, /* R */ { SST(0x21, 0x02, SS_RDEF, /* XXX TBD */ "Invalid address for write") }, /* R */ { SST(0x21, 0x03, SS_RDEF, /* XXX TBD */ "Invalid write crossing layer jump") }, /* D */ { SST(0x21, 0x04, SS_RDEF, /* XXX TBD */ "Unaligned write command") }, /* D */ { SST(0x21, 0x05, SS_RDEF, /* XXX TBD */ "Write boundary violation") }, /* D */ { SST(0x21, 0x06, SS_RDEF, /* XXX TBD */ "Attempt to read invalid data") }, /* D */ { SST(0x21, 0x07, SS_RDEF, /* XXX TBD */ "Read boundary violation") }, /* D */ { SST(0x22, 0x00, SS_FATAL | EINVAL, "Illegal function (use 20 00, 24 00, or 26 00)") }, /* DT P B */ { SST(0x23, 0x00, SS_FATAL | EINVAL, "Invalid token operation, cause not reportable") }, /* DT P B */ { SST(0x23, 0x01, SS_FATAL | EINVAL, "Invalid token operation, unsupported token type") }, /* DT P B */ { SST(0x23, 0x02, SS_FATAL | EINVAL, "Invalid token operation, remote token usage not supported") }, /* DT P B */ { SST(0x23, 0x03, SS_FATAL | EINVAL, "Invalid token operation, remote ROD token creation not supported") }, /* DT P B */ { SST(0x23, 0x04, SS_FATAL | EINVAL, "Invalid token operation, token unknown") }, /* DT P B */ { SST(0x23, 0x05, SS_FATAL | EINVAL, "Invalid token operation, token corrupt") }, /* DT P B */ { SST(0x23, 0x06, SS_FATAL | EINVAL, "Invalid token operation, token revoked") }, /* DT P B */ { SST(0x23, 0x07, SS_FATAL | EINVAL, "Invalid token operation, token expired") }, /* DT P B */ { SST(0x23, 0x08, SS_FATAL | EINVAL, "Invalid token operation, token cancelled") }, /* DT P B */ { SST(0x23, 0x09, SS_FATAL | EINVAL, "Invalid token operation, token deleted") }, /* DT P B */ { SST(0x23, 0x0A, SS_FATAL | EINVAL, "Invalid token operation, invalid token length") }, /* DTLPWROMAEBKVF */ { SST(0x24, 0x00, SS_FATAL | EINVAL, "Invalid field in CDB") }, /* DTLPWRO AEBKVF */ { SST(0x24, 0x01, SS_RDEF, /* XXX TBD */ "CDB decryption error") }, /* T */ { SST(0x24, 0x02, SS_RDEF, /* XXX TBD */ "Obsolete") }, /* T */ { SST(0x24, 0x03, SS_RDEF, /* XXX TBD */ "Obsolete") }, /* F */ { SST(0x24, 0x04, SS_RDEF, /* XXX TBD */ "Security audit value frozen") }, /* F */ { SST(0x24, 0x05, SS_RDEF, /* XXX TBD */ "Security working key frozen") }, /* F */ { SST(0x24, 0x06, SS_RDEF, /* XXX TBD */ "NONCE not unique") }, /* F */ { SST(0x24, 0x07, SS_RDEF, /* XXX TBD */ "NONCE timestamp out of range") }, /* DT R MAEBKV */ { SST(0x24, 0x08, SS_RDEF, /* XXX TBD */ "Invalid XCDB") }, /* DTLPWROMAEBKVF */ { SST(0x25, 0x00, SS_FATAL | ENXIO | SSQ_LOST, "Logical unit not supported") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x00, SS_FATAL | EINVAL, "Invalid field in parameter list") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x01, SS_FATAL | EINVAL, "Parameter not supported") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x02, SS_FATAL | EINVAL, "Parameter value invalid") }, /* DTLPWROMAE K */ { SST(0x26, 0x03, SS_FATAL | EINVAL, "Threshold parameters not supported") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x04, SS_FATAL | EINVAL, "Invalid release of persistent reservation") }, /* DTLPWRO A BK */ { SST(0x26, 0x05, SS_RDEF, /* XXX TBD */ "Data decryption error") }, /* DTLPWRO K */ { SST(0x26, 0x06, SS_FATAL | EINVAL, "Too many target descriptors") }, /* DTLPWRO K */ { SST(0x26, 0x07, SS_FATAL | EINVAL, "Unsupported target descriptor type code") }, /* DTLPWRO K */ { SST(0x26, 0x08, SS_FATAL | EINVAL, "Too many segment descriptors") }, /* DTLPWRO K */ { SST(0x26, 0x09, SS_FATAL | EINVAL, "Unsupported segment descriptor type code") }, /* DTLPWRO K */ { SST(0x26, 0x0A, SS_FATAL | EINVAL, "Unexpected inexact segment") }, /* DTLPWRO K */ { SST(0x26, 0x0B, SS_FATAL | EINVAL, "Inline data length exceeded") }, /* DTLPWRO K */ { SST(0x26, 0x0C, SS_FATAL | EINVAL, "Invalid operation for copy source or destination") }, /* DTLPWRO K */ { SST(0x26, 0x0D, SS_FATAL | EINVAL, "Copy segment granularity violation") }, /* DT PWROMAEBK */ { SST(0x26, 0x0E, SS_RDEF, /* XXX TBD */ "Invalid parameter while port is enabled") }, /* F */ { SST(0x26, 0x0F, SS_RDEF, /* XXX TBD */ "Invalid data-out buffer integrity check value") }, /* T */ { SST(0x26, 0x10, SS_RDEF, /* XXX TBD */ "Data decryption key fail limit reached") }, /* T */ { SST(0x26, 0x11, SS_RDEF, /* XXX TBD */ "Incomplete key-associated data set") }, /* T */ { SST(0x26, 0x12, SS_RDEF, /* XXX TBD */ "Vendor specific key reference not found") }, /* D */ { SST(0x26, 0x13, SS_RDEF, /* XXX TBD */ "Application tag mode page is invalid") }, /* DT WRO BK */ { SST(0x27, 0x00, SS_FATAL | EACCES, "Write protected") }, /* DT WRO BK */ { SST(0x27, 0x01, SS_FATAL | EACCES, "Hardware write protected") }, /* DT WRO BK */ { SST(0x27, 0x02, SS_FATAL | EACCES, "Logical unit software write protected") }, /* T R */ { SST(0x27, 0x03, SS_FATAL | EACCES, "Associated write protect") }, /* T R */ { SST(0x27, 0x04, SS_FATAL | EACCES, "Persistent write protect") }, /* T R */ { SST(0x27, 0x05, SS_FATAL | EACCES, "Permanent write protect") }, /* R F */ { SST(0x27, 0x06, SS_RDEF, /* XXX TBD */ "Conditional write protect") }, /* D B */ { SST(0x27, 0x07, SS_FATAL | ENOSPC, "Space allocation failed write protect") }, /* D */ { SST(0x27, 0x08, SS_FATAL | EACCES, "Zone is read only") }, /* DTLPWROMAEBKVF */ { SST(0x28, 0x00, SS_FATAL | ENXIO, "Not ready to ready change, medium may have changed") }, /* DT WROM B */ { SST(0x28, 0x01, SS_FATAL | ENXIO, "Import or export element accessed") }, /* R */ { SST(0x28, 0x02, SS_RDEF, /* XXX TBD */ "Format-layer may have changed") }, /* M */ { SST(0x28, 0x03, SS_RDEF, /* XXX TBD */ "Import/export element accessed, medium changed") }, /* * XXX JGibbs - All of these should use the same errno, but I don't * think ENXIO is the correct choice. Should we borrow from * the networking errnos? ECONNRESET anyone? */ /* DTLPWROMAEBKVF */ { SST(0x29, 0x00, SS_FATAL | ENXIO, "Power on, reset, or bus device reset occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x01, SS_RDEF, "Power on occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x02, SS_RDEF, "SCSI bus reset occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x03, SS_RDEF, "Bus device reset function occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x04, SS_RDEF, "Device internal reset") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x05, SS_RDEF, "Transceiver mode changed to single-ended") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x06, SS_RDEF, "Transceiver mode changed to LVD") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x07, SS_RDEF, /* XXX TBD */ "I_T nexus loss occurred") }, /* DTL WROMAEBKVF */ { SST(0x2A, 0x00, SS_RDEF, "Parameters changed") }, /* DTL WROMAEBKVF */ { SST(0x2A, 0x01, SS_RDEF, "Mode parameters changed") }, /* DTL WROMAE K */ { SST(0x2A, 0x02, SS_RDEF, "Log parameters changed") }, /* DTLPWROMAE K */ { SST(0x2A, 0x03, SS_RDEF, "Reservations preempted") }, /* DTLPWROMAE */ { SST(0x2A, 0x04, SS_RDEF, /* XXX TBD */ "Reservations released") }, /* DTLPWROMAE */ { SST(0x2A, 0x05, SS_RDEF, /* XXX TBD */ "Registrations preempted") }, /* DTLPWROMAEBKVF */ { SST(0x2A, 0x06, SS_RDEF, /* XXX TBD */ "Asymmetric access state changed") }, /* DTLPWROMAEBKVF */ { SST(0x2A, 0x07, SS_RDEF, /* XXX TBD */ "Implicit asymmetric access state transition failed") }, /* DT WROMAEBKVF */ { SST(0x2A, 0x08, SS_RDEF, /* XXX TBD */ "Priority changed") }, /* D */ { SST(0x2A, 0x09, SS_RDEF, /* XXX TBD */ "Capacity data has changed") }, /* DT */ { SST(0x2A, 0x0A, SS_RDEF, /* XXX TBD */ "Error history I_T nexus cleared") }, /* DT */ { SST(0x2A, 0x0B, SS_RDEF, /* XXX TBD */ "Error history snapshot released") }, /* F */ { SST(0x2A, 0x0C, SS_RDEF, /* XXX TBD */ "Error recovery attributes have changed") }, /* T */ { SST(0x2A, 0x0D, SS_RDEF, /* XXX TBD */ "Data encryption capabilities changed") }, /* DT M E V */ { SST(0x2A, 0x10, SS_RDEF, /* XXX TBD */ "Timestamp changed") }, /* T */ { SST(0x2A, 0x11, SS_RDEF, /* XXX TBD */ "Data encryption parameters changed by another I_T nexus") }, /* T */ { SST(0x2A, 0x12, SS_RDEF, /* XXX TBD */ "Data encryption parameters changed by vendor specific event") }, /* T */ { SST(0x2A, 0x13, SS_RDEF, /* XXX TBD */ "Data encryption key instance counter has changed") }, /* DT R MAEBKV */ { SST(0x2A, 0x14, SS_RDEF, /* XXX TBD */ "SA creation capabilities data has changed") }, /* T M V */ { SST(0x2A, 0x15, SS_RDEF, /* XXX TBD */ "Medium removal prevention preempted") }, /* DTLPWRO K */ { SST(0x2B, 0x00, SS_RDEF, "Copy cannot execute since host cannot disconnect") }, /* DTLPWROMAEBKVF */ { SST(0x2C, 0x00, SS_RDEF, "Command sequence error") }, /* */ { SST(0x2C, 0x01, SS_RDEF, "Too many windows specified") }, /* */ { SST(0x2C, 0x02, SS_RDEF, "Invalid combination of windows specified") }, /* R */ { SST(0x2C, 0x03, SS_RDEF, "Current program area is not empty") }, /* R */ { SST(0x2C, 0x04, SS_RDEF, "Current program area is empty") }, /* B */ { SST(0x2C, 0x05, SS_RDEF, /* XXX TBD */ "Illegal power condition request") }, /* R */ { SST(0x2C, 0x06, SS_RDEF, /* XXX TBD */ "Persistent prevent conflict") }, /* DTLPWROMAEBKVF */ { SST(0x2C, 0x07, SS_RDEF, /* XXX TBD */ "Previous busy status") }, /* DTLPWROMAEBKVF */ { SST(0x2C, 0x08, SS_RDEF, /* XXX TBD */ "Previous task set full status") }, /* DTLPWROM EBKVF */ { SST(0x2C, 0x09, SS_RDEF, /* XXX TBD */ "Previous reservation conflict status") }, /* F */ { SST(0x2C, 0x0A, SS_RDEF, /* XXX TBD */ "Partition or collection contains user objects") }, /* T */ { SST(0x2C, 0x0B, SS_RDEF, /* XXX TBD */ "Not reserved") }, /* D */ { SST(0x2C, 0x0C, SS_RDEF, /* XXX TBD */ "ORWRITE generation does not match") }, /* D */ { SST(0x2C, 0x0D, SS_RDEF, /* XXX TBD */ "Reset write pointer not allowed") }, /* D */ { SST(0x2C, 0x0E, SS_RDEF, /* XXX TBD */ "Zone is offline") }, /* D */ { SST(0x2C, 0x0F, SS_RDEF, /* XXX TBD */ "Stream not open") }, /* D */ { SST(0x2C, 0x10, SS_RDEF, /* XXX TBD */ "Unwritten data in zone") }, /* T */ { SST(0x2D, 0x00, SS_RDEF, "Overwrite error on update in place") }, /* R */ { SST(0x2E, 0x00, SS_RDEF, /* XXX TBD */ "Insufficient time for operation") }, /* D */ { SST(0x2E, 0x01, SS_RDEF, /* XXX TBD */ "Command timeout before processing") }, /* D */ { SST(0x2E, 0x02, SS_RDEF, /* XXX TBD */ "Command timeout during processing") }, /* D */ { SST(0x2E, 0x03, SS_RDEF, /* XXX TBD */ "Command timeout during processing due to error recovery") }, /* DTLPWROMAEBKVF */ { SST(0x2F, 0x00, SS_RDEF, "Commands cleared by another initiator") }, /* D */ { SST(0x2F, 0x01, SS_RDEF, /* XXX TBD */ "Commands cleared by power loss notification") }, /* DTLPWROMAEBKVF */ { SST(0x2F, 0x02, SS_RDEF, /* XXX TBD */ "Commands cleared by device server") }, /* DTLPWROMAEBKVF */ { SST(0x2F, 0x03, SS_RDEF, /* XXX TBD */ "Some commands cleared by queuing layer event") }, /* DT WROM BK */ { SST(0x30, 0x00, SS_RDEF, "Incompatible medium installed") }, /* DT WRO BK */ { SST(0x30, 0x01, SS_RDEF, "Cannot read medium - unknown format") }, /* DT WRO BK */ { SST(0x30, 0x02, SS_RDEF, "Cannot read medium - incompatible format") }, /* DT R K */ { SST(0x30, 0x03, SS_RDEF, "Cleaning cartridge installed") }, /* DT WRO BK */ { SST(0x30, 0x04, SS_RDEF, "Cannot write medium - unknown format") }, /* DT WRO BK */ { SST(0x30, 0x05, SS_RDEF, "Cannot write medium - incompatible format") }, /* DT WRO B */ { SST(0x30, 0x06, SS_RDEF, "Cannot format medium - incompatible medium") }, /* DTL WROMAEBKVF */ { SST(0x30, 0x07, SS_RDEF, "Cleaning failure") }, /* R */ { SST(0x30, 0x08, SS_RDEF, "Cannot write - application code mismatch") }, /* R */ { SST(0x30, 0x09, SS_RDEF, "Current session not fixated for append") }, /* DT WRO AEBK */ { SST(0x30, 0x0A, SS_RDEF, /* XXX TBD */ "Cleaning request rejected") }, /* T */ { SST(0x30, 0x0C, SS_RDEF, /* XXX TBD */ "WORM medium - overwrite attempted") }, /* T */ { SST(0x30, 0x0D, SS_RDEF, /* XXX TBD */ "WORM medium - integrity check") }, /* R */ { SST(0x30, 0x10, SS_RDEF, /* XXX TBD */ "Medium not formatted") }, /* M */ { SST(0x30, 0x11, SS_RDEF, /* XXX TBD */ "Incompatible volume type") }, /* M */ { SST(0x30, 0x12, SS_RDEF, /* XXX TBD */ "Incompatible volume qualifier") }, /* M */ { SST(0x30, 0x13, SS_RDEF, /* XXX TBD */ "Cleaning volume expired") }, /* DT WRO BK */ { SST(0x31, 0x00, SS_RDEF, "Medium format corrupted") }, /* D L RO B */ { SST(0x31, 0x01, SS_RDEF, "Format command failed") }, /* R */ { SST(0x31, 0x02, SS_RDEF, /* XXX TBD */ "Zoned formatting failed due to spare linking") }, /* D B */ { SST(0x31, 0x03, SS_RDEF, /* XXX TBD */ "SANITIZE command failed") }, /* D W O BK */ { SST(0x32, 0x00, SS_RDEF, "No defect spare location available") }, /* D W O BK */ { SST(0x32, 0x01, SS_RDEF, "Defect list update failure") }, /* T */ { SST(0x33, 0x00, SS_RDEF, "Tape length error") }, /* DTLPWROMAEBKVF */ { SST(0x34, 0x00, SS_RDEF, "Enclosure failure") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x00, SS_RDEF, "Enclosure services failure") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x01, SS_RDEF, "Unsupported enclosure function") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x02, SS_RDEF, "Enclosure services unavailable") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x03, SS_RDEF, "Enclosure services transfer failure") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x04, SS_RDEF, "Enclosure services transfer refused") }, /* DTL WROMAEBKVF */ { SST(0x35, 0x05, SS_RDEF, /* XXX TBD */ "Enclosure services checksum error") }, /* L */ { SST(0x36, 0x00, SS_RDEF, "Ribbon, ink, or toner failure") }, /* DTL WROMAEBKVF */ { SST(0x37, 0x00, SS_RDEF, "Rounded parameter") }, /* B */ { SST(0x38, 0x00, SS_RDEF, /* XXX TBD */ "Event status notification") }, /* B */ { SST(0x38, 0x02, SS_RDEF, /* XXX TBD */ "ESN - power management class event") }, /* B */ { SST(0x38, 0x04, SS_RDEF, /* XXX TBD */ "ESN - media class event") }, /* B */ { SST(0x38, 0x06, SS_RDEF, /* XXX TBD */ "ESN - device busy class event") }, /* D */ { SST(0x38, 0x07, SS_RDEF, /* XXX TBD */ "Thin provisioning soft threshold reached") }, /* DTL WROMAE K */ { SST(0x39, 0x00, SS_RDEF, "Saving parameters not supported") }, /* DTL WROM BK */ { SST(0x3A, 0x00, SS_FATAL | ENXIO, "Medium not present") }, /* DT WROM BK */ { SST(0x3A, 0x01, SS_FATAL | ENXIO, "Medium not present - tray closed") }, /* DT WROM BK */ { SST(0x3A, 0x02, SS_FATAL | ENXIO, "Medium not present - tray open") }, /* DT WROM B */ { SST(0x3A, 0x03, SS_RDEF, /* XXX TBD */ "Medium not present - loadable") }, /* DT WRO B */ { SST(0x3A, 0x04, SS_RDEF, /* XXX TBD */ "Medium not present - medium auxiliary memory accessible") }, /* TL */ { SST(0x3B, 0x00, SS_RDEF, "Sequential positioning error") }, /* T */ { SST(0x3B, 0x01, SS_RDEF, "Tape position error at beginning-of-medium") }, /* T */ { SST(0x3B, 0x02, SS_RDEF, "Tape position error at end-of-medium") }, /* L */ { SST(0x3B, 0x03, SS_RDEF, "Tape or electronic vertical forms unit not ready") }, /* L */ { SST(0x3B, 0x04, SS_RDEF, "Slew failure") }, /* L */ { SST(0x3B, 0x05, SS_RDEF, "Paper jam") }, /* L */ { SST(0x3B, 0x06, SS_RDEF, "Failed to sense top-of-form") }, /* L */ { SST(0x3B, 0x07, SS_RDEF, "Failed to sense bottom-of-form") }, /* T */ { SST(0x3B, 0x08, SS_RDEF, "Reposition error") }, /* */ { SST(0x3B, 0x09, SS_RDEF, "Read past end of medium") }, /* */ { SST(0x3B, 0x0A, SS_RDEF, "Read past beginning of medium") }, /* */ { SST(0x3B, 0x0B, SS_RDEF, "Position past end of medium") }, /* T */ { SST(0x3B, 0x0C, SS_RDEF, "Position past beginning of medium") }, /* DT WROM BK */ { SST(0x3B, 0x0D, SS_FATAL | ENOSPC, "Medium destination element full") }, /* DT WROM BK */ { SST(0x3B, 0x0E, SS_RDEF, "Medium source element empty") }, /* R */ { SST(0x3B, 0x0F, SS_RDEF, "End of medium reached") }, /* DT WROM BK */ { SST(0x3B, 0x11, SS_RDEF, "Medium magazine not accessible") }, /* DT WROM BK */ { SST(0x3B, 0x12, SS_RDEF, "Medium magazine removed") }, /* DT WROM BK */ { SST(0x3B, 0x13, SS_RDEF, "Medium magazine inserted") }, /* DT WROM BK */ { SST(0x3B, 0x14, SS_RDEF, "Medium magazine locked") }, /* DT WROM BK */ { SST(0x3B, 0x15, SS_RDEF, "Medium magazine unlocked") }, /* R */ { SST(0x3B, 0x16, SS_RDEF, /* XXX TBD */ "Mechanical positioning or changer error") }, /* F */ { SST(0x3B, 0x17, SS_RDEF, /* XXX TBD */ "Read past end of user object") }, /* M */ { SST(0x3B, 0x18, SS_RDEF, /* XXX TBD */ "Element disabled") }, /* M */ { SST(0x3B, 0x19, SS_RDEF, /* XXX TBD */ "Element enabled") }, /* M */ { SST(0x3B, 0x1A, SS_RDEF, /* XXX TBD */ "Data transfer device removed") }, /* M */ { SST(0x3B, 0x1B, SS_RDEF, /* XXX TBD */ "Data transfer device inserted") }, /* T */ { SST(0x3B, 0x1C, SS_RDEF, /* XXX TBD */ "Too many logical objects on partition to support operation") }, /* DTLPWROMAE K */ { SST(0x3D, 0x00, SS_RDEF, "Invalid bits in IDENTIFY message") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x00, SS_RDEF, "Logical unit has not self-configured yet") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x01, SS_RDEF, "Logical unit failure") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x02, SS_RDEF, "Timeout on logical unit") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x03, SS_RDEF, /* XXX TBD */ "Logical unit failed self-test") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x04, SS_RDEF, /* XXX TBD */ "Logical unit unable to update self-test log") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x00, SS_RDEF, "Target operating conditions have changed") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x01, SS_RDEF, "Microcode has been changed") }, /* DTLPWROM BK */ { SST(0x3F, 0x02, SS_RDEF, "Changed operating definition") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x03, SS_RDEF, "INQUIRY data has changed") }, /* DT WROMAEBK */ { SST(0x3F, 0x04, SS_RDEF, "Component device attached") }, /* DT WROMAEBK */ { SST(0x3F, 0x05, SS_RDEF, "Device identifier changed") }, /* DT WROMAEB */ { SST(0x3F, 0x06, SS_RDEF, "Redundancy group created or modified") }, /* DT WROMAEB */ { SST(0x3F, 0x07, SS_RDEF, "Redundancy group deleted") }, /* DT WROMAEB */ { SST(0x3F, 0x08, SS_RDEF, "Spare created or modified") }, /* DT WROMAEB */ { SST(0x3F, 0x09, SS_RDEF, "Spare deleted") }, /* DT WROMAEBK */ { SST(0x3F, 0x0A, SS_RDEF, "Volume set created or modified") }, /* DT WROMAEBK */ { SST(0x3F, 0x0B, SS_RDEF, "Volume set deleted") }, /* DT WROMAEBK */ { SST(0x3F, 0x0C, SS_RDEF, "Volume set deassigned") }, /* DT WROMAEBK */ { SST(0x3F, 0x0D, SS_RDEF, "Volume set reassigned") }, /* DTLPWROMAE */ { SST(0x3F, 0x0E, SS_RDEF | SSQ_RESCAN , "Reported LUNs data has changed") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x0F, SS_RDEF, /* XXX TBD */ "Echo buffer overwritten") }, /* DT WROM B */ { SST(0x3F, 0x10, SS_RDEF, /* XXX TBD */ "Medium loadable") }, /* DT WROM B */ { SST(0x3F, 0x11, SS_RDEF, /* XXX TBD */ "Medium auxiliary memory accessible") }, /* DTLPWR MAEBK F */ { SST(0x3F, 0x12, SS_RDEF, /* XXX TBD */ "iSCSI IP address added") }, /* DTLPWR MAEBK F */ { SST(0x3F, 0x13, SS_RDEF, /* XXX TBD */ "iSCSI IP address removed") }, /* DTLPWR MAEBK F */ { SST(0x3F, 0x14, SS_RDEF, /* XXX TBD */ "iSCSI IP address changed") }, /* DTLPWR MAEBK */ { SST(0x3F, 0x15, SS_RDEF, /* XXX TBD */ "Inspect referrals sense descriptors") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x16, SS_RDEF, /* XXX TBD */ "Microcode has been changed without reset") }, /* D */ { SST(0x3F, 0x17, SS_RDEF, /* XXX TBD */ "Zone transition to full") }, /* D */ { SST(0x40, 0x00, SS_RDEF, "RAM failure") }, /* deprecated - use 40 NN instead */ /* DTLPWROMAEBKVF */ { SST(0x40, 0x80, SS_RDEF, "Diagnostic failure: ASCQ = Component ID") }, /* DTLPWROMAEBKVF */ { SST(0x40, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x80->0xFF */ /* D */ { SST(0x41, 0x00, SS_RDEF, "Data path failure") }, /* deprecated - use 40 NN instead */ /* D */ { SST(0x42, 0x00, SS_RDEF, "Power-on or self-test failure") }, /* deprecated - use 40 NN instead */ /* DTLPWROMAEBKVF */ { SST(0x43, 0x00, SS_RDEF, "Message error") }, /* DTLPWROMAEBKVF */ { SST(0x44, 0x00, SS_RDEF, "Internal target failure") }, /* DT P MAEBKVF */ { SST(0x44, 0x01, SS_RDEF, /* XXX TBD */ "Persistent reservation information lost") }, /* DT B */ { SST(0x44, 0x71, SS_RDEF, /* XXX TBD */ "ATA device failed set features") }, /* DTLPWROMAEBKVF */ { SST(0x45, 0x00, SS_RDEF, "Select or reselect failure") }, /* DTLPWROM BK */ { SST(0x46, 0x00, SS_RDEF, "Unsuccessful soft reset") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x00, SS_RDEF, "SCSI parity error") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x01, SS_RDEF, /* XXX TBD */ "Data phase CRC error detected") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x02, SS_RDEF, /* XXX TBD */ "SCSI parity error detected during ST data phase") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x03, SS_RDEF, /* XXX TBD */ "Information unit iuCRC error detected") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x04, SS_RDEF, /* XXX TBD */ "Asynchronous information protection error detected") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x05, SS_RDEF, /* XXX TBD */ "Protocol service CRC error") }, /* DT MAEBKVF */ { SST(0x47, 0x06, SS_RDEF, /* XXX TBD */ "PHY test function in progress") }, /* DT PWROMAEBK */ { SST(0x47, 0x7F, SS_RDEF, /* XXX TBD */ "Some commands cleared by iSCSI protocol event") }, /* DTLPWROMAEBKVF */ { SST(0x48, 0x00, SS_RDEF, "Initiator detected error message received") }, /* DTLPWROMAEBKVF */ { SST(0x49, 0x00, SS_RDEF, "Invalid message error") }, /* DTLPWROMAEBKVF */ { SST(0x4A, 0x00, SS_RDEF, "Command phase error") }, /* DTLPWROMAEBKVF */ { SST(0x4B, 0x00, SS_RDEF, "Data phase error") }, /* DT PWROMAEBK */ { SST(0x4B, 0x01, SS_RDEF, /* XXX TBD */ "Invalid target port transfer tag received") }, /* DT PWROMAEBK */ { SST(0x4B, 0x02, SS_RDEF, /* XXX TBD */ "Too much write data") }, /* DT PWROMAEBK */ { SST(0x4B, 0x03, SS_RDEF, /* XXX TBD */ "ACK/NAK timeout") }, /* DT PWROMAEBK */ { SST(0x4B, 0x04, SS_RDEF, /* XXX TBD */ "NAK received") }, /* DT PWROMAEBK */ { SST(0x4B, 0x05, SS_RDEF, /* XXX TBD */ "Data offset error") }, /* DT PWROMAEBK */ { SST(0x4B, 0x06, SS_RDEF, /* XXX TBD */ "Initiator response timeout") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x07, SS_RDEF, /* XXX TBD */ "Connection lost") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x08, SS_RDEF, /* XXX TBD */ "Data-in buffer overflow - data buffer size") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x09, SS_RDEF, /* XXX TBD */ "Data-in buffer overflow - data buffer descriptor area") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0A, SS_RDEF, /* XXX TBD */ "Data-in buffer error") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0B, SS_RDEF, /* XXX TBD */ "Data-out buffer overflow - data buffer size") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0C, SS_RDEF, /* XXX TBD */ "Data-out buffer overflow - data buffer descriptor area") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0D, SS_RDEF, /* XXX TBD */ "Data-out buffer error") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0E, SS_RDEF, /* XXX TBD */ "PCIe fabric error") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0F, SS_RDEF, /* XXX TBD */ "PCIe completion timeout") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x10, SS_RDEF, /* XXX TBD */ "PCIe completer abort") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x11, SS_RDEF, /* XXX TBD */ "PCIe poisoned TLP received") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x12, SS_RDEF, /* XXX TBD */ "PCIe ECRC check failed") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x13, SS_RDEF, /* XXX TBD */ "PCIe unsupported request") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x14, SS_RDEF, /* XXX TBD */ "PCIe ACS violation") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x15, SS_RDEF, /* XXX TBD */ "PCIe TLP prefix blocket") }, /* DTLPWROMAEBKVF */ { SST(0x4C, 0x00, SS_RDEF, "Logical unit failed self-configuration") }, /* DTLPWROMAEBKVF */ { SST(0x4D, 0x00, SS_RDEF, "Tagged overlapped commands: ASCQ = Queue tag ID") }, /* DTLPWROMAEBKVF */ { SST(0x4D, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x00->0xFF */ /* DTLPWROMAEBKVF */ { SST(0x4E, 0x00, SS_RDEF, "Overlapped commands attempted") }, /* T */ { SST(0x50, 0x00, SS_RDEF, "Write append error") }, /* T */ { SST(0x50, 0x01, SS_RDEF, "Write append position error") }, /* T */ { SST(0x50, 0x02, SS_RDEF, "Position error related to timing") }, /* T RO */ { SST(0x51, 0x00, SS_RDEF, "Erase failure") }, /* R */ { SST(0x51, 0x01, SS_RDEF, /* XXX TBD */ "Erase failure - incomplete erase operation detected") }, /* T */ { SST(0x52, 0x00, SS_RDEF, "Cartridge fault") }, /* DTL WROM BK */ { SST(0x53, 0x00, SS_RDEF, "Media load or eject failed") }, /* T */ { SST(0x53, 0x01, SS_RDEF, "Unload tape failure") }, /* DT WROM BK */ { SST(0x53, 0x02, SS_RDEF, "Medium removal prevented") }, /* M */ { SST(0x53, 0x03, SS_RDEF, /* XXX TBD */ "Medium removal prevented by data transfer element") }, /* T */ { SST(0x53, 0x04, SS_RDEF, /* XXX TBD */ "Medium thread or unthread failure") }, /* M */ { SST(0x53, 0x05, SS_RDEF, /* XXX TBD */ "Volume identifier invalid") }, /* T */ { SST(0x53, 0x06, SS_RDEF, /* XXX TBD */ "Volume identifier missing") }, /* M */ { SST(0x53, 0x07, SS_RDEF, /* XXX TBD */ "Duplicate volume identifier") }, /* M */ { SST(0x53, 0x08, SS_RDEF, /* XXX TBD */ "Element status unknown") }, /* M */ { SST(0x53, 0x09, SS_RDEF, /* XXX TBD */ "Data transfer device error - load failed") }, /* M */ { SST(0x53, 0x0A, SS_RDEF, /* XXX TBD */ "Data transfer device error - unload failed") }, /* M */ { SST(0x53, 0x0B, SS_RDEF, /* XXX TBD */ "Data transfer device error - unload missing") }, /* M */ { SST(0x53, 0x0C, SS_RDEF, /* XXX TBD */ "Data transfer device error - eject failed") }, /* M */ { SST(0x53, 0x0D, SS_RDEF, /* XXX TBD */ "Data transfer device error - library communication failed") }, /* P */ { SST(0x54, 0x00, SS_RDEF, "SCSI to host system interface failure") }, /* P */ { SST(0x55, 0x00, SS_RDEF, "System resource failure") }, /* D O BK */ { SST(0x55, 0x01, SS_FATAL | ENOSPC, "System buffer full") }, /* DTLPWROMAE K */ { SST(0x55, 0x02, SS_RDEF, /* XXX TBD */ "Insufficient reservation resources") }, /* DTLPWROMAE K */ { SST(0x55, 0x03, SS_RDEF, /* XXX TBD */ "Insufficient resources") }, /* DTLPWROMAE K */ { SST(0x55, 0x04, SS_RDEF, /* XXX TBD */ "Insufficient registration resources") }, /* DT PWROMAEBK */ { SST(0x55, 0x05, SS_RDEF, /* XXX TBD */ "Insufficient access control resources") }, /* DT WROM B */ { SST(0x55, 0x06, SS_RDEF, /* XXX TBD */ "Auxiliary memory out of space") }, /* F */ { SST(0x55, 0x07, SS_RDEF, /* XXX TBD */ "Quota error") }, /* T */ { SST(0x55, 0x08, SS_RDEF, /* XXX TBD */ "Maximum number of supplemental decryption keys exceeded") }, /* M */ { SST(0x55, 0x09, SS_RDEF, /* XXX TBD */ "Medium auxiliary memory not accessible") }, /* M */ { SST(0x55, 0x0A, SS_RDEF, /* XXX TBD */ "Data currently unavailable") }, /* DTLPWROMAEBKVF */ { SST(0x55, 0x0B, SS_RDEF, /* XXX TBD */ "Insufficient power for operation") }, /* DT P B */ { SST(0x55, 0x0C, SS_RDEF, /* XXX TBD */ "Insufficient resources to create ROD") }, /* DT P B */ { SST(0x55, 0x0D, SS_RDEF, /* XXX TBD */ "Insufficient resources to create ROD token") }, /* D */ { SST(0x55, 0x0E, SS_RDEF, /* XXX TBD */ "Insufficient zone resources") }, /* D */ { SST(0x55, 0x0F, SS_RDEF, /* XXX TBD */ "Insufficient zone resources to complete write") }, /* D */ { SST(0x55, 0x10, SS_RDEF, /* XXX TBD */ "Maximum number of streams open") }, /* R */ { SST(0x57, 0x00, SS_RDEF, "Unable to recover table-of-contents") }, /* O */ { SST(0x58, 0x00, SS_RDEF, "Generation does not exist") }, /* O */ { SST(0x59, 0x00, SS_RDEF, "Updated block read") }, /* DTLPWRO BK */ { SST(0x5A, 0x00, SS_RDEF, "Operator request or state change input") }, /* DT WROM BK */ { SST(0x5A, 0x01, SS_RDEF, "Operator medium removal request") }, /* DT WRO A BK */ { SST(0x5A, 0x02, SS_RDEF, "Operator selected write protect") }, /* DT WRO A BK */ { SST(0x5A, 0x03, SS_RDEF, "Operator selected write permit") }, /* DTLPWROM K */ { SST(0x5B, 0x00, SS_RDEF, "Log exception") }, /* DTLPWROM K */ { SST(0x5B, 0x01, SS_RDEF, "Threshold condition met") }, /* DTLPWROM K */ { SST(0x5B, 0x02, SS_RDEF, "Log counter at maximum") }, /* DTLPWROM K */ { SST(0x5B, 0x03, SS_RDEF, "Log list codes exhausted") }, /* D O */ { SST(0x5C, 0x00, SS_RDEF, "RPL status change") }, /* D O */ { SST(0x5C, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Spindles synchronized") }, /* D O */ { SST(0x5C, 0x02, SS_RDEF, "Spindles not synchronized") }, /* DTLPWROMAEBKVF */ { SST(0x5D, 0x00, SS_RDEF, "Failure prediction threshold exceeded") }, /* R B */ { SST(0x5D, 0x01, SS_RDEF, /* XXX TBD */ "Media failure prediction threshold exceeded") }, /* R */ { SST(0x5D, 0x02, SS_RDEF, /* XXX TBD */ "Logical unit failure prediction threshold exceeded") }, /* R */ { SST(0x5D, 0x03, SS_RDEF, /* XXX TBD */ "Spare area exhaustion prediction threshold exceeded") }, /* D B */ { SST(0x5D, 0x10, SS_RDEF, /* XXX TBD */ "Hardware impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x11, SS_RDEF, /* XXX TBD */ "Hardware impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x12, SS_RDEF, /* XXX TBD */ "Hardware impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x13, SS_RDEF, /* XXX TBD */ "Hardware impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x14, SS_RDEF, /* XXX TBD */ "Hardware impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x15, SS_RDEF, /* XXX TBD */ "Hardware impending failure access times too high") }, /* D B */ { SST(0x5D, 0x16, SS_RDEF, /* XXX TBD */ "Hardware impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x17, SS_RDEF, /* XXX TBD */ "Hardware impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x18, SS_RDEF, /* XXX TBD */ "Hardware impending failure controller detected") }, /* D B */ { SST(0x5D, 0x19, SS_RDEF, /* XXX TBD */ "Hardware impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x1A, SS_RDEF, /* XXX TBD */ "Hardware impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x1B, SS_RDEF, /* XXX TBD */ "Hardware impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x1C, SS_RDEF, /* XXX TBD */ "Hardware impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x20, SS_RDEF, /* XXX TBD */ "Controller impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x21, SS_RDEF, /* XXX TBD */ "Controller impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x22, SS_RDEF, /* XXX TBD */ "Controller impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x23, SS_RDEF, /* XXX TBD */ "Controller impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x24, SS_RDEF, /* XXX TBD */ "Controller impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x25, SS_RDEF, /* XXX TBD */ "Controller impending failure access times too high") }, /* D B */ { SST(0x5D, 0x26, SS_RDEF, /* XXX TBD */ "Controller impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x27, SS_RDEF, /* XXX TBD */ "Controller impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x28, SS_RDEF, /* XXX TBD */ "Controller impending failure controller detected") }, /* D B */ { SST(0x5D, 0x29, SS_RDEF, /* XXX TBD */ "Controller impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x2A, SS_RDEF, /* XXX TBD */ "Controller impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x2B, SS_RDEF, /* XXX TBD */ "Controller impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x2C, SS_RDEF, /* XXX TBD */ "Controller impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x30, SS_RDEF, /* XXX TBD */ "Data channel impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x31, SS_RDEF, /* XXX TBD */ "Data channel impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x32, SS_RDEF, /* XXX TBD */ "Data channel impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x33, SS_RDEF, /* XXX TBD */ "Data channel impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x34, SS_RDEF, /* XXX TBD */ "Data channel impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x35, SS_RDEF, /* XXX TBD */ "Data channel impending failure access times too high") }, /* D B */ { SST(0x5D, 0x36, SS_RDEF, /* XXX TBD */ "Data channel impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x37, SS_RDEF, /* XXX TBD */ "Data channel impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x38, SS_RDEF, /* XXX TBD */ "Data channel impending failure controller detected") }, /* D B */ { SST(0x5D, 0x39, SS_RDEF, /* XXX TBD */ "Data channel impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x3A, SS_RDEF, /* XXX TBD */ "Data channel impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x3B, SS_RDEF, /* XXX TBD */ "Data channel impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x3C, SS_RDEF, /* XXX TBD */ "Data channel impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x40, SS_RDEF, /* XXX TBD */ "Servo impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x41, SS_RDEF, /* XXX TBD */ "Servo impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x42, SS_RDEF, /* XXX TBD */ "Servo impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x43, SS_RDEF, /* XXX TBD */ "Servo impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x44, SS_RDEF, /* XXX TBD */ "Servo impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x45, SS_RDEF, /* XXX TBD */ "Servo impending failure access times too high") }, /* D B */ { SST(0x5D, 0x46, SS_RDEF, /* XXX TBD */ "Servo impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x47, SS_RDEF, /* XXX TBD */ "Servo impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x48, SS_RDEF, /* XXX TBD */ "Servo impending failure controller detected") }, /* D B */ { SST(0x5D, 0x49, SS_RDEF, /* XXX TBD */ "Servo impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x4A, SS_RDEF, /* XXX TBD */ "Servo impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x4B, SS_RDEF, /* XXX TBD */ "Servo impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x4C, SS_RDEF, /* XXX TBD */ "Servo impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x50, SS_RDEF, /* XXX TBD */ "Spindle impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x51, SS_RDEF, /* XXX TBD */ "Spindle impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x52, SS_RDEF, /* XXX TBD */ "Spindle impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x53, SS_RDEF, /* XXX TBD */ "Spindle impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x54, SS_RDEF, /* XXX TBD */ "Spindle impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x55, SS_RDEF, /* XXX TBD */ "Spindle impending failure access times too high") }, /* D B */ { SST(0x5D, 0x56, SS_RDEF, /* XXX TBD */ "Spindle impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x57, SS_RDEF, /* XXX TBD */ "Spindle impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x58, SS_RDEF, /* XXX TBD */ "Spindle impending failure controller detected") }, /* D B */ { SST(0x5D, 0x59, SS_RDEF, /* XXX TBD */ "Spindle impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x5A, SS_RDEF, /* XXX TBD */ "Spindle impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x5B, SS_RDEF, /* XXX TBD */ "Spindle impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x5C, SS_RDEF, /* XXX TBD */ "Spindle impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x60, SS_RDEF, /* XXX TBD */ "Firmware impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x61, SS_RDEF, /* XXX TBD */ "Firmware impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x62, SS_RDEF, /* XXX TBD */ "Firmware impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x63, SS_RDEF, /* XXX TBD */ "Firmware impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x64, SS_RDEF, /* XXX TBD */ "Firmware impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x65, SS_RDEF, /* XXX TBD */ "Firmware impending failure access times too high") }, /* D B */ { SST(0x5D, 0x66, SS_RDEF, /* XXX TBD */ "Firmware impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x67, SS_RDEF, /* XXX TBD */ "Firmware impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x68, SS_RDEF, /* XXX TBD */ "Firmware impending failure controller detected") }, /* D B */ { SST(0x5D, 0x69, SS_RDEF, /* XXX TBD */ "Firmware impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x6A, SS_RDEF, /* XXX TBD */ "Firmware impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x6B, SS_RDEF, /* XXX TBD */ "Firmware impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x6C, SS_RDEF, /* XXX TBD */ "Firmware impending failure drive calibration retry count") }, /* DTLPWROMAEBKVF */ { SST(0x5D, 0xFF, SS_RDEF, "Failure prediction threshold exceeded (false)") }, /* DTLPWRO A K */ { SST(0x5E, 0x00, SS_RDEF, "Low power condition on") }, /* DTLPWRO A K */ { SST(0x5E, 0x01, SS_RDEF, "Idle condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x02, SS_RDEF, "Standby condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x03, SS_RDEF, "Idle condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x04, SS_RDEF, "Standby condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x05, SS_RDEF, "Idle-B condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x06, SS_RDEF, "Idle-B condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x07, SS_RDEF, "Idle-C condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x08, SS_RDEF, "Idle-C condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x09, SS_RDEF, "Standby-Y condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x0A, SS_RDEF, "Standby-Y condition activated by command") }, /* B */ { SST(0x5E, 0x41, SS_RDEF, /* XXX TBD */ "Power state change to active") }, /* B */ { SST(0x5E, 0x42, SS_RDEF, /* XXX TBD */ "Power state change to idle") }, /* B */ { SST(0x5E, 0x43, SS_RDEF, /* XXX TBD */ "Power state change to standby") }, /* B */ { SST(0x5E, 0x45, SS_RDEF, /* XXX TBD */ "Power state change to sleep") }, /* BK */ { SST(0x5E, 0x47, SS_RDEF, /* XXX TBD */ "Power state change to device control") }, /* */ { SST(0x60, 0x00, SS_RDEF, "Lamp failure") }, /* */ { SST(0x61, 0x00, SS_RDEF, "Video acquisition error") }, /* */ { SST(0x61, 0x01, SS_RDEF, "Unable to acquire video") }, /* */ { SST(0x61, 0x02, SS_RDEF, "Out of focus") }, /* */ { SST(0x62, 0x00, SS_RDEF, "Scan head positioning error") }, /* R */ { SST(0x63, 0x00, SS_RDEF, "End of user area encountered on this track") }, /* R */ { SST(0x63, 0x01, SS_FATAL | ENOSPC, "Packet does not fit in available space") }, /* R */ { SST(0x64, 0x00, SS_FATAL | ENXIO, "Illegal mode for this track") }, /* R */ { SST(0x64, 0x01, SS_RDEF, "Invalid packet size") }, /* DTLPWROMAEBKVF */ { SST(0x65, 0x00, SS_RDEF, "Voltage fault") }, /* */ { SST(0x66, 0x00, SS_RDEF, "Automatic document feeder cover up") }, /* */ { SST(0x66, 0x01, SS_RDEF, "Automatic document feeder lift up") }, /* */ { SST(0x66, 0x02, SS_RDEF, "Document jam in automatic document feeder") }, /* */ { SST(0x66, 0x03, SS_RDEF, "Document miss feed automatic in document feeder") }, /* A */ { SST(0x67, 0x00, SS_RDEF, "Configuration failure") }, /* A */ { SST(0x67, 0x01, SS_RDEF, "Configuration of incapable logical units failed") }, /* A */ { SST(0x67, 0x02, SS_RDEF, "Add logical unit failed") }, /* A */ { SST(0x67, 0x03, SS_RDEF, "Modification of logical unit failed") }, /* A */ { SST(0x67, 0x04, SS_RDEF, "Exchange of logical unit failed") }, /* A */ { SST(0x67, 0x05, SS_RDEF, "Remove of logical unit failed") }, /* A */ { SST(0x67, 0x06, SS_RDEF, "Attachment of logical unit failed") }, /* A */ { SST(0x67, 0x07, SS_RDEF, "Creation of logical unit failed") }, /* A */ { SST(0x67, 0x08, SS_RDEF, /* XXX TBD */ "Assign failure occurred") }, /* A */ { SST(0x67, 0x09, SS_RDEF, /* XXX TBD */ "Multiply assigned logical unit") }, /* DTLPWROMAEBKVF */ { SST(0x67, 0x0A, SS_RDEF, /* XXX TBD */ "Set target port groups command failed") }, /* DT B */ { SST(0x67, 0x0B, SS_RDEF, /* XXX TBD */ "ATA device feature not enabled") }, /* A */ { SST(0x68, 0x00, SS_RDEF, "Logical unit not configured") }, /* D */ { SST(0x68, 0x01, SS_RDEF, "Subsidiary logical unit not configured") }, /* A */ { SST(0x69, 0x00, SS_RDEF, "Data loss on logical unit") }, /* A */ { SST(0x69, 0x01, SS_RDEF, "Multiple logical unit failures") }, /* A */ { SST(0x69, 0x02, SS_RDEF, "Parity/data mismatch") }, /* A */ { SST(0x6A, 0x00, SS_RDEF, "Informational, refer to log") }, /* A */ { SST(0x6B, 0x00, SS_RDEF, "State change has occurred") }, /* A */ { SST(0x6B, 0x01, SS_RDEF, "Redundancy level got better") }, /* A */ { SST(0x6B, 0x02, SS_RDEF, "Redundancy level got worse") }, /* A */ { SST(0x6C, 0x00, SS_RDEF, "Rebuild failure occurred") }, /* A */ { SST(0x6D, 0x00, SS_RDEF, "Recalculate failure occurred") }, /* A */ { SST(0x6E, 0x00, SS_RDEF, "Command to logical unit failed") }, /* R */ { SST(0x6F, 0x00, SS_RDEF, /* XXX TBD */ "Copy protection key exchange failure - authentication failure") }, /* R */ { SST(0x6F, 0x01, SS_RDEF, /* XXX TBD */ "Copy protection key exchange failure - key not present") }, /* R */ { SST(0x6F, 0x02, SS_RDEF, /* XXX TBD */ "Copy protection key exchange failure - key not established") }, /* R */ { SST(0x6F, 0x03, SS_RDEF, /* XXX TBD */ "Read of scrambled sector without authentication") }, /* R */ { SST(0x6F, 0x04, SS_RDEF, /* XXX TBD */ "Media region code is mismatched to logical unit region") }, /* R */ { SST(0x6F, 0x05, SS_RDEF, /* XXX TBD */ "Drive region must be permanent/region reset count error") }, /* R */ { SST(0x6F, 0x06, SS_RDEF, /* XXX TBD */ "Insufficient block count for binding NONCE recording") }, /* R */ { SST(0x6F, 0x07, SS_RDEF, /* XXX TBD */ "Conflict in binding NONCE recording") }, /* T */ { SST(0x70, 0x00, SS_RDEF, "Decompression exception short: ASCQ = Algorithm ID") }, /* T */ { SST(0x70, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x00 -> 0xFF */ /* T */ { SST(0x71, 0x00, SS_RDEF, "Decompression exception long: ASCQ = Algorithm ID") }, /* T */ { SST(0x71, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x00 -> 0xFF */ /* R */ { SST(0x72, 0x00, SS_RDEF, "Session fixation error") }, /* R */ { SST(0x72, 0x01, SS_RDEF, "Session fixation error writing lead-in") }, /* R */ { SST(0x72, 0x02, SS_RDEF, "Session fixation error writing lead-out") }, /* R */ { SST(0x72, 0x03, SS_RDEF, "Session fixation error - incomplete track in session") }, /* R */ { SST(0x72, 0x04, SS_RDEF, "Empty or partially written reserved track") }, /* R */ { SST(0x72, 0x05, SS_RDEF, /* XXX TBD */ "No more track reservations allowed") }, /* R */ { SST(0x72, 0x06, SS_RDEF, /* XXX TBD */ "RMZ extension is not allowed") }, /* R */ { SST(0x72, 0x07, SS_RDEF, /* XXX TBD */ "No more test zone extensions are allowed") }, /* R */ { SST(0x73, 0x00, SS_RDEF, "CD control error") }, /* R */ { SST(0x73, 0x01, SS_RDEF, "Power calibration area almost full") }, /* R */ { SST(0x73, 0x02, SS_FATAL | ENOSPC, "Power calibration area is full") }, /* R */ { SST(0x73, 0x03, SS_RDEF, "Power calibration area error") }, /* R */ { SST(0x73, 0x04, SS_RDEF, "Program memory area update failure") }, /* R */ { SST(0x73, 0x05, SS_RDEF, "Program memory area is full") }, /* R */ { SST(0x73, 0x06, SS_RDEF, /* XXX TBD */ "RMA/PMA is almost full") }, /* R */ { SST(0x73, 0x10, SS_RDEF, /* XXX TBD */ "Current power calibration area almost full") }, /* R */ { SST(0x73, 0x11, SS_RDEF, /* XXX TBD */ "Current power calibration area is full") }, /* R */ { SST(0x73, 0x17, SS_RDEF, /* XXX TBD */ "RDZ is full") }, /* T */ { SST(0x74, 0x00, SS_RDEF, /* XXX TBD */ "Security error") }, /* T */ { SST(0x74, 0x01, SS_RDEF, /* XXX TBD */ "Unable to decrypt data") }, /* T */ { SST(0x74, 0x02, SS_RDEF, /* XXX TBD */ "Unencrypted data encountered while decrypting") }, /* T */ { SST(0x74, 0x03, SS_RDEF, /* XXX TBD */ "Incorrect data encryption key") }, /* T */ { SST(0x74, 0x04, SS_RDEF, /* XXX TBD */ "Cryptographic integrity validation failed") }, /* T */ { SST(0x74, 0x05, SS_RDEF, /* XXX TBD */ "Error decrypting data") }, /* T */ { SST(0x74, 0x06, SS_RDEF, /* XXX TBD */ "Unknown signature verification key") }, /* T */ { SST(0x74, 0x07, SS_RDEF, /* XXX TBD */ "Encryption parameters not useable") }, /* DT R M E VF */ { SST(0x74, 0x08, SS_RDEF, /* XXX TBD */ "Digital signature validation failure") }, /* T */ { SST(0x74, 0x09, SS_RDEF, /* XXX TBD */ "Encryption mode mismatch on read") }, /* T */ { SST(0x74, 0x0A, SS_RDEF, /* XXX TBD */ "Encrypted block not raw read enabled") }, /* T */ { SST(0x74, 0x0B, SS_RDEF, /* XXX TBD */ "Incorrect encryption parameters") }, /* DT R MAEBKV */ { SST(0x74, 0x0C, SS_RDEF, /* XXX TBD */ "Unable to decrypt parameter list") }, /* T */ { SST(0x74, 0x0D, SS_RDEF, /* XXX TBD */ "Encryption algorithm disabled") }, /* DT R MAEBKV */ { SST(0x74, 0x10, SS_RDEF, /* XXX TBD */ "SA creation parameter value invalid") }, /* DT R MAEBKV */ { SST(0x74, 0x11, SS_RDEF, /* XXX TBD */ "SA creation parameter value rejected") }, /* DT R MAEBKV */ { SST(0x74, 0x12, SS_RDEF, /* XXX TBD */ "Invalid SA usage") }, /* T */ { SST(0x74, 0x21, SS_RDEF, /* XXX TBD */ "Data encryption configuration prevented") }, /* DT R MAEBKV */ { SST(0x74, 0x30, SS_RDEF, /* XXX TBD */ "SA creation parameter not supported") }, /* DT R MAEBKV */ { SST(0x74, 0x40, SS_RDEF, /* XXX TBD */ "Authentication failed") }, /* V */ { SST(0x74, 0x61, SS_RDEF, /* XXX TBD */ "External data encryption key manager access error") }, /* V */ { SST(0x74, 0x62, SS_RDEF, /* XXX TBD */ "External data encryption key manager error") }, /* V */ { SST(0x74, 0x63, SS_RDEF, /* XXX TBD */ "External data encryption key not found") }, /* V */ { SST(0x74, 0x64, SS_RDEF, /* XXX TBD */ "External data encryption request not authorized") }, /* T */ { SST(0x74, 0x6E, SS_RDEF, /* XXX TBD */ "External data encryption control timeout") }, /* T */ { SST(0x74, 0x6F, SS_RDEF, /* XXX TBD */ "External data encryption control error") }, /* DT R M E V */ { SST(0x74, 0x71, SS_RDEF, /* XXX TBD */ "Logical unit access not authorized") }, /* D */ { SST(0x74, 0x79, SS_RDEF, /* XXX TBD */ "Security conflict in translated device") } }; const int asc_table_size = sizeof(asc_table)/sizeof(asc_table[0]); struct asc_key { int asc; int ascq; }; static int ascentrycomp(const void *key, const void *member) { int asc; int ascq; const struct asc_table_entry *table_entry; asc = ((const struct asc_key *)key)->asc; ascq = ((const struct asc_key *)key)->ascq; table_entry = (const struct asc_table_entry *)member; if (asc >= table_entry->asc) { if (asc > table_entry->asc) return (1); if (ascq <= table_entry->ascq) { /* Check for ranges */ if (ascq == table_entry->ascq || ((table_entry->action & SSQ_RANGE) != 0 && ascq >= (table_entry - 1)->ascq)) return (0); return (-1); } return (1); } return (-1); } static int senseentrycomp(const void *key, const void *member) { int sense_key; const struct sense_key_table_entry *table_entry; sense_key = *((const int *)key); table_entry = (const struct sense_key_table_entry *)member; if (sense_key >= table_entry->sense_key) { if (sense_key == table_entry->sense_key) return (0); return (1); } return (-1); } static void fetchtableentries(int sense_key, int asc, int ascq, struct scsi_inquiry_data *inq_data, const struct sense_key_table_entry **sense_entry, const struct asc_table_entry **asc_entry) { caddr_t match; const struct asc_table_entry *asc_tables[2]; const struct sense_key_table_entry *sense_tables[2]; struct asc_key asc_ascq; size_t asc_tables_size[2]; size_t sense_tables_size[2]; int num_asc_tables; int num_sense_tables; int i; /* Default to failure */ *sense_entry = NULL; *asc_entry = NULL; match = NULL; if (inq_data != NULL) match = cam_quirkmatch((caddr_t)inq_data, (caddr_t)sense_quirk_table, sense_quirk_table_size, sizeof(*sense_quirk_table), scsi_inquiry_match); if (match != NULL) { struct scsi_sense_quirk_entry *quirk; quirk = (struct scsi_sense_quirk_entry *)match; asc_tables[0] = quirk->asc_info; asc_tables_size[0] = quirk->num_ascs; asc_tables[1] = asc_table; asc_tables_size[1] = asc_table_size; num_asc_tables = 2; sense_tables[0] = quirk->sense_key_info; sense_tables_size[0] = quirk->num_sense_keys; sense_tables[1] = sense_key_table; sense_tables_size[1] = sense_key_table_size; num_sense_tables = 2; } else { asc_tables[0] = asc_table; asc_tables_size[0] = asc_table_size; num_asc_tables = 1; sense_tables[0] = sense_key_table; sense_tables_size[0] = sense_key_table_size; num_sense_tables = 1; } asc_ascq.asc = asc; asc_ascq.ascq = ascq; for (i = 0; i < num_asc_tables; i++) { void *found_entry; found_entry = bsearch(&asc_ascq, asc_tables[i], asc_tables_size[i], sizeof(**asc_tables), ascentrycomp); if (found_entry) { *asc_entry = (struct asc_table_entry *)found_entry; break; } } for (i = 0; i < num_sense_tables; i++) { void *found_entry; found_entry = bsearch(&sense_key, sense_tables[i], sense_tables_size[i], sizeof(**sense_tables), senseentrycomp); if (found_entry) { *sense_entry = (struct sense_key_table_entry *)found_entry; break; } } } void scsi_sense_desc(int sense_key, int asc, int ascq, struct scsi_inquiry_data *inq_data, const char **sense_key_desc, const char **asc_desc) { const struct asc_table_entry *asc_entry; const struct sense_key_table_entry *sense_entry; fetchtableentries(sense_key, asc, ascq, inq_data, &sense_entry, &asc_entry); if (sense_entry != NULL) *sense_key_desc = sense_entry->desc; else *sense_key_desc = "Invalid Sense Key"; if (asc_entry != NULL) *asc_desc = asc_entry->desc; else if (asc >= 0x80 && asc <= 0xff) *asc_desc = "Vendor Specific ASC"; else if (ascq >= 0x80 && ascq <= 0xff) *asc_desc = "Vendor Specific ASCQ"; else *asc_desc = "Reserved ASC/ASCQ pair"; } /* * Given sense and device type information, return the appropriate action. * If we do not understand the specific error as identified by the ASC/ASCQ * pair, fall back on the more generic actions derived from the sense key. */ scsi_sense_action scsi_error_action(struct ccb_scsiio *csio, struct scsi_inquiry_data *inq_data, u_int32_t sense_flags) { const struct asc_table_entry *asc_entry; const struct sense_key_table_entry *sense_entry; int error_code, sense_key, asc, ascq; scsi_sense_action action; if (!scsi_extract_sense_ccb((union ccb *)csio, &error_code, &sense_key, &asc, &ascq)) { action = SS_RETRY | SSQ_DECREMENT_COUNT | SSQ_PRINT_SENSE | EIO; } else if ((error_code == SSD_DEFERRED_ERROR) || (error_code == SSD_DESC_DEFERRED_ERROR)) { /* * XXX dufault@FreeBSD.org * This error doesn't relate to the command associated * with this request sense. A deferred error is an error * for a command that has already returned GOOD status * (see SCSI2 8.2.14.2). * * By my reading of that section, it looks like the current * command has been cancelled, we should now clean things up * (hopefully recovering any lost data) and then retry the * current command. There are two easy choices, both wrong: * * 1. Drop through (like we had been doing), thus treating * this as if the error were for the current command and * return and stop the current command. * * 2. Issue a retry (like I made it do) thus hopefully * recovering the current transfer, and ignoring the * fact that we've dropped a command. * * These should probably be handled in a device specific * sense handler or punted back up to a user mode daemon */ action = SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE; } else { fetchtableentries(sense_key, asc, ascq, inq_data, &sense_entry, &asc_entry); /* * Override the 'No additional Sense' entry (0,0) * with the error action of the sense key. */ if (asc_entry != NULL && (asc != 0 || ascq != 0)) action = asc_entry->action; else if (sense_entry != NULL) action = sense_entry->action; else action = SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE; if (sense_key == SSD_KEY_RECOVERED_ERROR) { /* * The action succeeded but the device wants * the user to know that some recovery action * was required. */ action &= ~(SS_MASK|SSQ_MASK|SS_ERRMASK); action |= SS_NOP|SSQ_PRINT_SENSE; } else if (sense_key == SSD_KEY_ILLEGAL_REQUEST) { if ((sense_flags & SF_QUIET_IR) != 0) action &= ~SSQ_PRINT_SENSE; } else if (sense_key == SSD_KEY_UNIT_ATTENTION) { if ((sense_flags & SF_RETRY_UA) != 0 && (action & SS_MASK) == SS_FAIL) { action &= ~(SS_MASK|SSQ_MASK); action |= SS_RETRY|SSQ_DECREMENT_COUNT| SSQ_PRINT_SENSE; } action |= SSQ_UA; } } if ((action & SS_MASK) >= SS_START && (sense_flags & SF_NO_RECOVERY)) { action &= ~SS_MASK; action |= SS_FAIL; } else if ((action & SS_MASK) == SS_RETRY && (sense_flags & SF_NO_RETRY)) { action &= ~SS_MASK; action |= SS_FAIL; } if ((sense_flags & SF_PRINT_ALWAYS) != 0) action |= SSQ_PRINT_SENSE; else if ((sense_flags & SF_NO_PRINT) != 0) action &= ~SSQ_PRINT_SENSE; return (action); } char * scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string, size_t len) { + struct sbuf sb; + int error; + + if (len == 0) + return (""); + + sbuf_new(&sb, cdb_string, len, SBUF_FIXEDLEN); + + scsi_cdb_sbuf(cdb_ptr, &sb); + + /* ENOMEM just means that the fixed buffer is full, OK to ignore */ + error = sbuf_finish(&sb); + if (error != 0 && error != ENOMEM) + return (""); + + return(sbuf_data(&sb)); +} + +void +scsi_cdb_sbuf(u_int8_t *cdb_ptr, struct sbuf *sb) +{ u_int8_t cdb_len; int i; if (cdb_ptr == NULL) - return(""); + return; - /* Silence warnings */ - cdb_len = 0; - /* * This is taken from the SCSI-3 draft spec. * (T10/1157D revision 0.3) * The top 3 bits of an opcode are the group code. The next 5 bits * are the command code. * Group 0: six byte commands * Group 1: ten byte commands * Group 2: ten byte commands * Group 3: reserved * Group 4: sixteen byte commands * Group 5: twelve byte commands * Group 6: vendor specific * Group 7: vendor specific */ switch((*cdb_ptr >> 5) & 0x7) { case 0: cdb_len = 6; break; case 1: case 2: cdb_len = 10; break; case 3: case 6: case 7: /* in this case, just print out the opcode */ cdb_len = 1; break; case 4: cdb_len = 16; break; case 5: cdb_len = 12; break; } - *cdb_string = '\0'; + for (i = 0; i < cdb_len; i++) - snprintf(cdb_string + strlen(cdb_string), - len - strlen(cdb_string), "%02hhx ", cdb_ptr[i]); + sbuf_printf(sb, "%02hhx ", cdb_ptr[i]); - return(cdb_string); + return; } const char * scsi_status_string(struct ccb_scsiio *csio) { switch(csio->scsi_status) { case SCSI_STATUS_OK: return("OK"); case SCSI_STATUS_CHECK_COND: return("Check Condition"); case SCSI_STATUS_BUSY: return("Busy"); case SCSI_STATUS_INTERMED: return("Intermediate"); case SCSI_STATUS_INTERMED_COND_MET: return("Intermediate-Condition Met"); case SCSI_STATUS_RESERV_CONFLICT: return("Reservation Conflict"); case SCSI_STATUS_CMD_TERMINATED: return("Command Terminated"); case SCSI_STATUS_QUEUE_FULL: return("Queue Full"); case SCSI_STATUS_ACA_ACTIVE: return("ACA Active"); case SCSI_STATUS_TASK_ABORTED: return("Task Aborted"); default: { static char unkstr[64]; snprintf(unkstr, sizeof(unkstr), "Unknown %#x", csio->scsi_status); return(unkstr); } } } /* * scsi_command_string() returns 0 for success and -1 for failure. */ #ifdef _KERNEL int scsi_command_string(struct ccb_scsiio *csio, struct sbuf *sb) #else /* !_KERNEL */ int scsi_command_string(struct cam_device *device, struct ccb_scsiio *csio, struct sbuf *sb) #endif /* _KERNEL/!_KERNEL */ { struct scsi_inquiry_data *inq_data; - char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; #ifdef _KERNEL struct ccb_getdev *cgd; #endif /* _KERNEL */ #ifdef _KERNEL if ((cgd = (struct ccb_getdev*)xpt_alloc_ccb_nowait()) == NULL) return(-1); /* * Get the device information. */ xpt_setup_ccb(&cgd->ccb_h, csio->ccb_h.path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); /* * If the device is unconfigured, just pretend that it is a hard * drive. scsi_op_desc() needs this. */ if (cgd->ccb_h.status == CAM_DEV_NOT_THERE) cgd->inq_data.device = T_DIRECT; inq_data = &cgd->inq_data; #else /* !_KERNEL */ inq_data = &device->inq_data; #endif /* _KERNEL/!_KERNEL */ if ((csio->ccb_h.flags & CAM_CDB_POINTER) != 0) { - sbuf_printf(sb, "%s. CDB: %s", - scsi_op_desc(csio->cdb_io.cdb_ptr[0], inq_data), - scsi_cdb_string(csio->cdb_io.cdb_ptr, cdb_str, - sizeof(cdb_str))); + sbuf_printf(sb, "%s. CDB: ", + scsi_op_desc(csio->cdb_io.cdb_ptr[0], inq_data)); + scsi_cdb_sbuf(csio->cdb_io.cdb_ptr, sb); } else { - sbuf_printf(sb, "%s. CDB: %s", - scsi_op_desc(csio->cdb_io.cdb_bytes[0], inq_data), - scsi_cdb_string(csio->cdb_io.cdb_bytes, cdb_str, - sizeof(cdb_str))); + sbuf_printf(sb, "%s. CDB: ", + scsi_op_desc(csio->cdb_io.cdb_bytes[0], inq_data)); + scsi_cdb_sbuf(csio->cdb_io.cdb_bytes, sb); } #ifdef _KERNEL xpt_free_ccb((union ccb *)cgd); #endif return(0); } /* * Iterate over sense descriptors. Each descriptor is passed into iter_func(). * If iter_func() returns 0, list traversal continues. If iter_func() * returns non-zero, list traversal is stopped. */ void scsi_desc_iterate(struct scsi_sense_data_desc *sense, u_int sense_len, int (*iter_func)(struct scsi_sense_data_desc *sense, u_int, struct scsi_sense_desc_header *, void *), void *arg) { int cur_pos; int desc_len; /* * First make sure the extra length field is present. */ if (SSD_DESC_IS_PRESENT(sense, sense_len, extra_len) == 0) return; /* * The length of data actually returned may be different than the * extra_len recorded in the sturcture. */ desc_len = sense_len -offsetof(struct scsi_sense_data_desc, sense_desc); /* * Limit this further by the extra length reported, and the maximum * allowed extra length. */ desc_len = MIN(desc_len, MIN(sense->extra_len, SSD_EXTRA_MAX)); /* * Subtract the size of the header from the descriptor length. * This is to ensure that we have at least the header left, so we * don't have to check that inside the loop. This can wind up * being a negative value. */ desc_len -= sizeof(struct scsi_sense_desc_header); for (cur_pos = 0; cur_pos < desc_len;) { struct scsi_sense_desc_header *header; header = (struct scsi_sense_desc_header *) &sense->sense_desc[cur_pos]; /* * Check to make sure we have the entire descriptor. We * don't call iter_func() unless we do. * * Note that although cur_pos is at the beginning of the * descriptor, desc_len already has the header length * subtracted. So the comparison of the length in the * header (which does not include the header itself) to * desc_len - cur_pos is correct. */ if (header->length > (desc_len - cur_pos)) break; if (iter_func(sense, sense_len, header, arg) != 0) break; cur_pos += sizeof(*header) + header->length; } } struct scsi_find_desc_info { uint8_t desc_type; struct scsi_sense_desc_header *header; }; static int scsi_find_desc_func(struct scsi_sense_data_desc *sense, u_int sense_len, struct scsi_sense_desc_header *header, void *arg) { struct scsi_find_desc_info *desc_info; desc_info = (struct scsi_find_desc_info *)arg; if (header->desc_type == desc_info->desc_type) { desc_info->header = header; /* We found the descriptor, tell the iterator to stop. */ return (1); } else return (0); } /* * Given a descriptor type, return a pointer to it if it is in the sense * data and not truncated. Avoiding truncating sense data will simplify * things significantly for the caller. */ uint8_t * scsi_find_desc(struct scsi_sense_data_desc *sense, u_int sense_len, uint8_t desc_type) { struct scsi_find_desc_info desc_info; desc_info.desc_type = desc_type; desc_info.header = NULL; scsi_desc_iterate(sense, sense_len, scsi_find_desc_func, &desc_info); return ((uint8_t *)desc_info.header); } /* * Fill in SCSI sense data with the specified parameters. This routine can * fill in either fixed or descriptor type sense data. */ void scsi_set_sense_data_va(struct scsi_sense_data *sense_data, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, va_list ap) { int descriptor_sense; scsi_sense_elem_type elem_type; /* * Determine whether to return fixed or descriptor format sense * data. If the user specifies SSD_TYPE_NONE for some reason, * they'll just get fixed sense data. */ if (sense_format == SSD_TYPE_DESC) descriptor_sense = 1; else descriptor_sense = 0; /* * Zero the sense data, so that we don't pass back any garbage data * to the user. */ memset(sense_data, 0, sizeof(*sense_data)); if (descriptor_sense != 0) { struct scsi_sense_data_desc *sense; sense = (struct scsi_sense_data_desc *)sense_data; /* * The descriptor sense format eliminates the use of the * valid bit. */ if (current_error != 0) sense->error_code = SSD_DESC_CURRENT_ERROR; else sense->error_code = SSD_DESC_DEFERRED_ERROR; sense->sense_key = sense_key; sense->add_sense_code = asc; sense->add_sense_code_qual = ascq; /* * Start off with no extra length, since the above data * fits in the standard descriptor sense information. */ sense->extra_len = 0; while ((elem_type = (scsi_sense_elem_type)va_arg(ap, scsi_sense_elem_type)) != SSD_ELEM_NONE) { int sense_len, len_to_copy; uint8_t *data; if (elem_type >= SSD_ELEM_MAX) { printf("%s: invalid sense type %d\n", __func__, elem_type); break; } sense_len = (int)va_arg(ap, int); len_to_copy = MIN(sense_len, SSD_EXTRA_MAX - sense->extra_len); data = (uint8_t *)va_arg(ap, uint8_t *); /* * We've already consumed the arguments for this one. */ if (elem_type == SSD_ELEM_SKIP) continue; switch (elem_type) { case SSD_ELEM_DESC: { /* * This is a straight descriptor. All we * need to do is copy the data in. */ bcopy(data, &sense->sense_desc[ sense->extra_len], len_to_copy); sense->extra_len += len_to_copy; break; } case SSD_ELEM_SKS: { struct scsi_sense_sks sks; bzero(&sks, sizeof(sks)); /* * This is already-formatted sense key * specific data. We just need to fill out * the header and copy everything in. */ bcopy(data, &sks.sense_key_spec, MIN(len_to_copy, sizeof(sks.sense_key_spec))); sks.desc_type = SSD_DESC_SKS; sks.length = sizeof(sks) - offsetof(struct scsi_sense_sks, reserved1); bcopy(&sks,&sense->sense_desc[sense->extra_len], sizeof(sks)); sense->extra_len += sizeof(sks); break; } case SSD_ELEM_INFO: case SSD_ELEM_COMMAND: { struct scsi_sense_command cmd; struct scsi_sense_info info; uint8_t *data_dest; uint8_t *descriptor; int descriptor_size, i, copy_len; bzero(&cmd, sizeof(cmd)); bzero(&info, sizeof(info)); /* * Command or information data. The * operate in pretty much the same way. */ if (elem_type == SSD_ELEM_COMMAND) { len_to_copy = MIN(len_to_copy, sizeof(cmd.command_info)); descriptor = (uint8_t *)&cmd; descriptor_size = sizeof(cmd); data_dest =(uint8_t *)&cmd.command_info; cmd.desc_type = SSD_DESC_COMMAND; cmd.length = sizeof(cmd) - offsetof(struct scsi_sense_command, reserved); } else { len_to_copy = MIN(len_to_copy, sizeof(info.info)); descriptor = (uint8_t *)&info; descriptor_size = sizeof(cmd); data_dest = (uint8_t *)&info.info; info.desc_type = SSD_DESC_INFO; info.byte2 = SSD_INFO_VALID; info.length = sizeof(info) - offsetof(struct scsi_sense_info, byte2); } /* * Copy this in reverse because the spec * (SPC-4) says that when 4 byte quantities * are stored in this 8 byte field, the * first four bytes shall be 0. * * So we fill the bytes in from the end, and * if we have less than 8 bytes to copy, * the initial, most significant bytes will * be 0. */ for (i = sense_len - 1; i >= 0 && len_to_copy > 0; i--, len_to_copy--) data_dest[len_to_copy - 1] = data[i]; /* * This calculation looks much like the * initial len_to_copy calculation, but * we have to do it again here, because * we're looking at a larger amount that * may or may not fit. It's not only the * data the user passed in, but also the * rest of the descriptor. */ copy_len = MIN(descriptor_size, SSD_EXTRA_MAX - sense->extra_len); bcopy(descriptor, &sense->sense_desc[ sense->extra_len], copy_len); sense->extra_len += copy_len; break; } case SSD_ELEM_FRU: { struct scsi_sense_fru fru; int copy_len; bzero(&fru, sizeof(fru)); fru.desc_type = SSD_DESC_FRU; fru.length = sizeof(fru) - offsetof(struct scsi_sense_fru, reserved); fru.fru = *data; copy_len = MIN(sizeof(fru), SSD_EXTRA_MAX - sense->extra_len); bcopy(&fru, &sense->sense_desc[ sense->extra_len], copy_len); sense->extra_len += copy_len; break; } case SSD_ELEM_STREAM: { struct scsi_sense_stream stream_sense; int copy_len; bzero(&stream_sense, sizeof(stream_sense)); stream_sense.desc_type = SSD_DESC_STREAM; stream_sense.length = sizeof(stream_sense) - offsetof(struct scsi_sense_stream, reserved); stream_sense.byte3 = *data; copy_len = MIN(sizeof(stream_sense), SSD_EXTRA_MAX - sense->extra_len); bcopy(&stream_sense, &sense->sense_desc[ sense->extra_len], copy_len); sense->extra_len += copy_len; break; } default: /* * We shouldn't get here, but if we do, do * nothing. We've already consumed the * arguments above. */ break; } } } else { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (current_error != 0) sense->error_code = SSD_CURRENT_ERROR; else sense->error_code = SSD_DEFERRED_ERROR; sense->flags = sense_key; sense->add_sense_code = asc; sense->add_sense_code_qual = ascq; /* * We've set the ASC and ASCQ, so we have 6 more bytes of * valid data. If we wind up setting any of the other * fields, we'll bump this to 10 extra bytes. */ sense->extra_len = 6; while ((elem_type = (scsi_sense_elem_type)va_arg(ap, scsi_sense_elem_type)) != SSD_ELEM_NONE) { int sense_len, len_to_copy; uint8_t *data; if (elem_type >= SSD_ELEM_MAX) { printf("%s: invalid sense type %d\n", __func__, elem_type); break; } /* * If we get in here, just bump the extra length to * 10 bytes. That will encompass anything we're * going to set here. */ sense->extra_len = 10; sense_len = (int)va_arg(ap, int); data = (uint8_t *)va_arg(ap, uint8_t *); switch (elem_type) { case SSD_ELEM_SKS: /* * The user passed in pre-formatted sense * key specific data. */ bcopy(data, &sense->sense_key_spec[0], MIN(sizeof(sense->sense_key_spec), sense_len)); break; case SSD_ELEM_INFO: case SSD_ELEM_COMMAND: { uint8_t *data_dest; int i; if (elem_type == SSD_ELEM_COMMAND) { data_dest = &sense->cmd_spec_info[0]; len_to_copy = MIN(sense_len, sizeof(sense->cmd_spec_info)); } else { data_dest = &sense->info[0]; len_to_copy = MIN(sense_len, sizeof(sense->info)); /* * We're setting the info field, so * set the valid bit. */ sense->error_code |= SSD_ERRCODE_VALID; } /* * Copy this in reverse so that if we have * less than 4 bytes to fill, the least * significant bytes will be at the end. * If we have more than 4 bytes, only the * least significant bytes will be included. */ for (i = sense_len - 1; i >= 0 && len_to_copy > 0; i--, len_to_copy--) data_dest[len_to_copy - 1] = data[i]; break; } case SSD_ELEM_FRU: sense->fru = *data; break; case SSD_ELEM_STREAM: sense->flags |= *data; break; case SSD_ELEM_DESC: default: /* * If the user passes in descriptor sense, * we can't handle that in fixed format. * So just skip it, and any unknown argument * types. */ break; } } } } void scsi_set_sense_data(struct scsi_sense_data *sense_data, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, ...) { va_list ap; va_start(ap, ascq); scsi_set_sense_data_va(sense_data, sense_format, current_error, sense_key, asc, ascq, ap); va_end(ap); } /* * Get sense information for three similar sense data types. */ int scsi_get_sense_info(struct scsi_sense_data *sense_data, u_int sense_len, uint8_t info_type, uint64_t *info, int64_t *signed_info) { scsi_sense_data_type sense_type; if (sense_len == 0) goto bailout; sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; uint8_t *desc; sense = (struct scsi_sense_data_desc *)sense_data; desc = scsi_find_desc(sense, sense_len, info_type); if (desc == NULL) goto bailout; switch (info_type) { case SSD_DESC_INFO: { struct scsi_sense_info *info_desc; info_desc = (struct scsi_sense_info *)desc; *info = scsi_8btou64(info_desc->info); if (signed_info != NULL) *signed_info = *info; break; } case SSD_DESC_COMMAND: { struct scsi_sense_command *cmd_desc; cmd_desc = (struct scsi_sense_command *)desc; *info = scsi_8btou64(cmd_desc->command_info); if (signed_info != NULL) *signed_info = *info; break; } case SSD_DESC_FRU: { struct scsi_sense_fru *fru_desc; fru_desc = (struct scsi_sense_fru *)desc; *info = fru_desc->fru; if (signed_info != NULL) *signed_info = (int8_t)fru_desc->fru; break; } default: goto bailout; break; } break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; switch (info_type) { case SSD_DESC_INFO: { uint32_t info_val; if ((sense->error_code & SSD_ERRCODE_VALID) == 0) goto bailout; if (SSD_FIXED_IS_PRESENT(sense, sense_len, info) == 0) goto bailout; info_val = scsi_4btoul(sense->info); *info = info_val; if (signed_info != NULL) *signed_info = (int32_t)info_val; break; } case SSD_DESC_COMMAND: { uint32_t cmd_val; if ((SSD_FIXED_IS_PRESENT(sense, sense_len, cmd_spec_info) == 0) || (SSD_FIXED_IS_FILLED(sense, cmd_spec_info) == 0)) goto bailout; cmd_val = scsi_4btoul(sense->cmd_spec_info); if (cmd_val == 0) goto bailout; *info = cmd_val; if (signed_info != NULL) *signed_info = (int32_t)cmd_val; break; } case SSD_DESC_FRU: if ((SSD_FIXED_IS_PRESENT(sense, sense_len, fru) == 0) || (SSD_FIXED_IS_FILLED(sense, fru) == 0)) goto bailout; if (sense->fru == 0) goto bailout; *info = sense->fru; if (signed_info != NULL) *signed_info = (int8_t)sense->fru; break; default: goto bailout; break; } break; } default: goto bailout; break; } return (0); bailout: return (1); } int scsi_get_sks(struct scsi_sense_data *sense_data, u_int sense_len, uint8_t *sks) { scsi_sense_data_type sense_type; if (sense_len == 0) goto bailout; sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; struct scsi_sense_sks *desc; sense = (struct scsi_sense_data_desc *)sense_data; desc = (struct scsi_sense_sks *)scsi_find_desc(sense, sense_len, SSD_DESC_SKS); if (desc == NULL) goto bailout; /* * No need to check the SKS valid bit for descriptor sense. * If the descriptor is present, it is valid. */ bcopy(desc->sense_key_spec, sks, sizeof(desc->sense_key_spec)); break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if ((SSD_FIXED_IS_PRESENT(sense, sense_len, sense_key_spec)== 0) || (SSD_FIXED_IS_FILLED(sense, sense_key_spec) == 0)) goto bailout; if ((sense->sense_key_spec[0] & SSD_SCS_VALID) == 0) goto bailout; bcopy(sense->sense_key_spec, sks,sizeof(sense->sense_key_spec)); break; } default: goto bailout; break; } return (0); bailout: return (1); } /* * Provide a common interface for fixed and descriptor sense to detect * whether we have block-specific sense information. It is clear by the * presence of the block descriptor in descriptor mode, but we have to * infer from the inquiry data and ILI bit in fixed mode. */ int scsi_get_block_info(struct scsi_sense_data *sense_data, u_int sense_len, struct scsi_inquiry_data *inq_data, uint8_t *block_bits) { scsi_sense_data_type sense_type; if (inq_data != NULL) { switch (SID_TYPE(inq_data)) { case T_DIRECT: case T_RBC: break; default: goto bailout; break; } } sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; struct scsi_sense_block *block; sense = (struct scsi_sense_data_desc *)sense_data; block = (struct scsi_sense_block *)scsi_find_desc(sense, sense_len, SSD_DESC_BLOCK); if (block == NULL) goto bailout; *block_bits = block->byte3; break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (SSD_FIXED_IS_PRESENT(sense, sense_len, flags) == 0) goto bailout; if ((sense->flags & SSD_ILI) == 0) goto bailout; *block_bits = sense->flags & SSD_ILI; break; } default: goto bailout; break; } return (0); bailout: return (1); } int scsi_get_stream_info(struct scsi_sense_data *sense_data, u_int sense_len, struct scsi_inquiry_data *inq_data, uint8_t *stream_bits) { scsi_sense_data_type sense_type; if (inq_data != NULL) { switch (SID_TYPE(inq_data)) { case T_SEQUENTIAL: break; default: goto bailout; break; } } sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; struct scsi_sense_stream *stream; sense = (struct scsi_sense_data_desc *)sense_data; stream = (struct scsi_sense_stream *)scsi_find_desc(sense, sense_len, SSD_DESC_STREAM); if (stream == NULL) goto bailout; *stream_bits = stream->byte3; break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (SSD_FIXED_IS_PRESENT(sense, sense_len, flags) == 0) goto bailout; if ((sense->flags & (SSD_ILI|SSD_EOM|SSD_FILEMARK)) == 0) goto bailout; *stream_bits = sense->flags & (SSD_ILI|SSD_EOM|SSD_FILEMARK); break; } default: goto bailout; break; } return (0); bailout: return (1); } void scsi_info_sbuf(struct sbuf *sb, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, uint64_t info) { sbuf_printf(sb, "Info: %#jx", info); } void scsi_command_sbuf(struct sbuf *sb, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, uint64_t csi) { sbuf_printf(sb, "Command Specific Info: %#jx", csi); } void scsi_progress_sbuf(struct sbuf *sb, uint16_t progress) { sbuf_printf(sb, "Progress: %d%% (%d/%d) complete", (progress * 100) / SSD_SKS_PROGRESS_DENOM, progress, SSD_SKS_PROGRESS_DENOM); } /* * Returns 1 for failure (i.e. SKS isn't valid) and 0 for success. */ int scsi_sks_sbuf(struct sbuf *sb, int sense_key, uint8_t *sks) { if ((sks[0] & SSD_SKS_VALID) == 0) return (1); switch (sense_key) { case SSD_KEY_ILLEGAL_REQUEST: { struct scsi_sense_sks_field *field; int bad_command; char tmpstr[40]; /*Field Pointer*/ field = (struct scsi_sense_sks_field *)sks; if (field->byte0 & SSD_SKS_FIELD_CMD) bad_command = 1; else bad_command = 0; tmpstr[0] = '\0'; /* Bit pointer is valid */ if (field->byte0 & SSD_SKS_BPV) snprintf(tmpstr, sizeof(tmpstr), "bit %d ", field->byte0 & SSD_SKS_BIT_VALUE); sbuf_printf(sb, "%s byte %d %sis invalid", bad_command ? "Command" : "Data", scsi_2btoul(field->field), tmpstr); break; } case SSD_KEY_UNIT_ATTENTION: { struct scsi_sense_sks_overflow *overflow; overflow = (struct scsi_sense_sks_overflow *)sks; /*UA Condition Queue Overflow*/ sbuf_printf(sb, "Unit Attention Condition Queue %s", (overflow->byte0 & SSD_SKS_OVERFLOW_SET) ? "Overflowed" : "Did Not Overflow??"); break; } case SSD_KEY_RECOVERED_ERROR: case SSD_KEY_HARDWARE_ERROR: case SSD_KEY_MEDIUM_ERROR: { struct scsi_sense_sks_retry *retry; /*Actual Retry Count*/ retry = (struct scsi_sense_sks_retry *)sks; sbuf_printf(sb, "Actual Retry Count: %d", scsi_2btoul(retry->actual_retry_count)); break; } case SSD_KEY_NO_SENSE: case SSD_KEY_NOT_READY: { struct scsi_sense_sks_progress *progress; int progress_val; /*Progress Indication*/ progress = (struct scsi_sense_sks_progress *)sks; progress_val = scsi_2btoul(progress->progress); scsi_progress_sbuf(sb, progress_val); break; } case SSD_KEY_COPY_ABORTED: { struct scsi_sense_sks_segment *segment; char tmpstr[40]; /*Segment Pointer*/ segment = (struct scsi_sense_sks_segment *)sks; tmpstr[0] = '\0'; if (segment->byte0 & SSD_SKS_SEGMENT_BPV) snprintf(tmpstr, sizeof(tmpstr), "bit %d ", segment->byte0 & SSD_SKS_SEGMENT_BITPTR); sbuf_printf(sb, "%s byte %d %sis invalid", (segment->byte0 & SSD_SKS_SEGMENT_SD) ? "Segment" : "Data", scsi_2btoul(segment->field), tmpstr); break; } default: sbuf_printf(sb, "Sense Key Specific: %#x,%#x", sks[0], scsi_2btoul(&sks[1])); break; } return (0); } void scsi_fru_sbuf(struct sbuf *sb, uint64_t fru) { sbuf_printf(sb, "Field Replaceable Unit: %d", (int)fru); } void scsi_stream_sbuf(struct sbuf *sb, uint8_t stream_bits, uint64_t info) { int need_comma; need_comma = 0; /* * XXX KDM this needs more descriptive decoding. */ if (stream_bits & SSD_DESC_STREAM_FM) { sbuf_printf(sb, "Filemark"); need_comma = 1; } if (stream_bits & SSD_DESC_STREAM_EOM) { sbuf_printf(sb, "%sEOM", (need_comma) ? "," : ""); need_comma = 1; } if (stream_bits & SSD_DESC_STREAM_ILI) sbuf_printf(sb, "%sILI", (need_comma) ? "," : ""); sbuf_printf(sb, ": Info: %#jx", (uintmax_t) info); } void scsi_block_sbuf(struct sbuf *sb, uint8_t block_bits, uint64_t info) { if (block_bits & SSD_DESC_BLOCK_ILI) sbuf_printf(sb, "ILI: residue %#jx", (uintmax_t) info); } void scsi_sense_info_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_info *info; info = (struct scsi_sense_info *)header; scsi_info_sbuf(sb, cdb, cdb_len, inq_data, scsi_8btou64(info->info)); } void scsi_sense_command_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_command *command; command = (struct scsi_sense_command *)header; scsi_command_sbuf(sb, cdb, cdb_len, inq_data, scsi_8btou64(command->command_info)); } void scsi_sense_sks_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_sks *sks; int error_code, sense_key, asc, ascq; sks = (struct scsi_sense_sks *)header; scsi_extract_sense_len(sense, sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); scsi_sks_sbuf(sb, sense_key, sks->sense_key_spec); } void scsi_sense_fru_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_fru *fru; fru = (struct scsi_sense_fru *)header; scsi_fru_sbuf(sb, (uint64_t)fru->fru); } void scsi_sense_stream_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_stream *stream; uint64_t info; stream = (struct scsi_sense_stream *)header; info = 0; scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info, NULL); scsi_stream_sbuf(sb, stream->byte3, info); } void scsi_sense_block_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_block *block; uint64_t info; block = (struct scsi_sense_block *)header; info = 0; scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info, NULL); scsi_block_sbuf(sb, block->byte3, info); } void scsi_sense_progress_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_progress *progress; const char *sense_key_desc; const char *asc_desc; int progress_val; progress = (struct scsi_sense_progress *)header; /* * Get descriptions for the sense key, ASC, and ASCQ in the * progress descriptor. These could be different than the values * in the overall sense data. */ scsi_sense_desc(progress->sense_key, progress->add_sense_code, progress->add_sense_code_qual, inq_data, &sense_key_desc, &asc_desc); progress_val = scsi_2btoul(progress->progress); /* * The progress indicator is for the operation described by the * sense key, ASC, and ASCQ in the descriptor. */ sbuf_cat(sb, sense_key_desc); sbuf_printf(sb, " asc:%x,%x (%s): ", progress->add_sense_code, progress->add_sense_code_qual, asc_desc); scsi_progress_sbuf(sb, progress_val); } /* * Generic sense descriptor printing routine. This is used when we have * not yet implemented a specific printing routine for this descriptor. */ void scsi_sense_generic_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { int i; uint8_t *buf_ptr; sbuf_printf(sb, "Descriptor %#x:", header->desc_type); buf_ptr = (uint8_t *)&header[1]; for (i = 0; i < header->length; i++, buf_ptr++) sbuf_printf(sb, " %02x", *buf_ptr); } /* * Keep this list in numeric order. This speeds the array traversal. */ struct scsi_sense_desc_printer { uint8_t desc_type; /* * The function arguments here are the superset of what is needed * to print out various different descriptors. Command and * information descriptors need inquiry data and command type. * Sense key specific descriptors need the sense key. * * The sense, cdb, and inquiry data arguments may be NULL, but the * information printed may not be fully decoded as a result. */ void (*print_func)(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); } scsi_sense_printers[] = { {SSD_DESC_INFO, scsi_sense_info_sbuf}, {SSD_DESC_COMMAND, scsi_sense_command_sbuf}, {SSD_DESC_SKS, scsi_sense_sks_sbuf}, {SSD_DESC_FRU, scsi_sense_fru_sbuf}, {SSD_DESC_STREAM, scsi_sense_stream_sbuf}, {SSD_DESC_BLOCK, scsi_sense_block_sbuf}, {SSD_DESC_PROGRESS, scsi_sense_progress_sbuf} }; void scsi_sense_desc_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { int i; for (i = 0; i < (sizeof(scsi_sense_printers) / sizeof(scsi_sense_printers[0])); i++) { struct scsi_sense_desc_printer *printer; printer = &scsi_sense_printers[i]; /* * The list is sorted, so quit if we've passed our * descriptor number. */ if (printer->desc_type > header->desc_type) break; if (printer->desc_type != header->desc_type) continue; printer->print_func(sb, sense, sense_len, cdb, cdb_len, inq_data, header); return; } /* * No specific printing routine, so use the generic routine. */ scsi_sense_generic_sbuf(sb, sense, sense_len, cdb, cdb_len, inq_data, header); } scsi_sense_data_type scsi_sense_type(struct scsi_sense_data *sense_data) { switch (sense_data->error_code & SSD_ERRCODE) { case SSD_DESC_CURRENT_ERROR: case SSD_DESC_DEFERRED_ERROR: return (SSD_TYPE_DESC); break; case SSD_CURRENT_ERROR: case SSD_DEFERRED_ERROR: return (SSD_TYPE_FIXED); break; default: break; } return (SSD_TYPE_NONE); } struct scsi_print_sense_info { struct sbuf *sb; char *path_str; uint8_t *cdb; int cdb_len; struct scsi_inquiry_data *inq_data; }; static int scsi_print_desc_func(struct scsi_sense_data_desc *sense, u_int sense_len, struct scsi_sense_desc_header *header, void *arg) { struct scsi_print_sense_info *print_info; print_info = (struct scsi_print_sense_info *)arg; switch (header->desc_type) { case SSD_DESC_INFO: case SSD_DESC_FRU: case SSD_DESC_COMMAND: case SSD_DESC_SKS: case SSD_DESC_BLOCK: case SSD_DESC_STREAM: /* * We have already printed these descriptors, if they are * present. */ break; default: { sbuf_printf(print_info->sb, "%s", print_info->path_str); scsi_sense_desc_sbuf(print_info->sb, (struct scsi_sense_data *)sense, sense_len, print_info->cdb, print_info->cdb_len, print_info->inq_data, header); sbuf_printf(print_info->sb, "\n"); break; } } /* * Tell the iterator that we want to see more descriptors if they * are present. */ return (0); } void scsi_sense_only_sbuf(struct scsi_sense_data *sense, u_int sense_len, struct sbuf *sb, char *path_str, struct scsi_inquiry_data *inq_data, uint8_t *cdb, int cdb_len) { int error_code, sense_key, asc, ascq; sbuf_cat(sb, path_str); scsi_extract_sense_len(sense, sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); sbuf_printf(sb, "SCSI sense: "); switch (error_code) { case SSD_DEFERRED_ERROR: case SSD_DESC_DEFERRED_ERROR: sbuf_printf(sb, "Deferred error: "); /* FALLTHROUGH */ case SSD_CURRENT_ERROR: case SSD_DESC_CURRENT_ERROR: { struct scsi_sense_data_desc *desc_sense; struct scsi_print_sense_info print_info; const char *sense_key_desc; const char *asc_desc; uint8_t sks[3]; uint64_t val; int info_valid; /* * Get descriptions for the sense key, ASC, and ASCQ. If * these aren't present in the sense data (i.e. the sense * data isn't long enough), the -1 values that * scsi_extract_sense_len() returns will yield default * or error descriptions. */ scsi_sense_desc(sense_key, asc, ascq, inq_data, &sense_key_desc, &asc_desc); /* * We first print the sense key and ASC/ASCQ. */ sbuf_cat(sb, sense_key_desc); sbuf_printf(sb, " asc:%x,%x (%s)\n", asc, ascq, asc_desc); /* * Get the info field if it is valid. */ if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &val, NULL) == 0) info_valid = 1; else info_valid = 0; if (info_valid != 0) { uint8_t bits; /* * Determine whether we have any block or stream * device-specific information. */ if (scsi_get_block_info(sense, sense_len, inq_data, &bits) == 0) { sbuf_cat(sb, path_str); scsi_block_sbuf(sb, bits, val); sbuf_printf(sb, "\n"); } else if (scsi_get_stream_info(sense, sense_len, inq_data, &bits) == 0) { sbuf_cat(sb, path_str); scsi_stream_sbuf(sb, bits, val); sbuf_printf(sb, "\n"); } else if (val != 0) { /* * The information field can be valid but 0. * If the block or stream bits aren't set, * and this is 0, it isn't terribly useful * to print it out. */ sbuf_cat(sb, path_str); scsi_info_sbuf(sb, cdb, cdb_len, inq_data, val); sbuf_printf(sb, "\n"); } } /* * Print the FRU. */ if (scsi_get_sense_info(sense, sense_len, SSD_DESC_FRU, &val, NULL) == 0) { sbuf_cat(sb, path_str); scsi_fru_sbuf(sb, val); sbuf_printf(sb, "\n"); } /* * Print any command-specific information. */ if (scsi_get_sense_info(sense, sense_len, SSD_DESC_COMMAND, &val, NULL) == 0) { sbuf_cat(sb, path_str); scsi_command_sbuf(sb, cdb, cdb_len, inq_data, val); sbuf_printf(sb, "\n"); } /* * Print out any sense-key-specific information. */ if (scsi_get_sks(sense, sense_len, sks) == 0) { sbuf_cat(sb, path_str); scsi_sks_sbuf(sb, sense_key, sks); sbuf_printf(sb, "\n"); } /* * If this is fixed sense, we're done. If we have * descriptor sense, we might have more information * available. */ if (scsi_sense_type(sense) != SSD_TYPE_DESC) break; desc_sense = (struct scsi_sense_data_desc *)sense; print_info.sb = sb; print_info.path_str = path_str; print_info.cdb = cdb; print_info.cdb_len = cdb_len; print_info.inq_data = inq_data; /* * Print any sense descriptors that we have not already printed. */ scsi_desc_iterate(desc_sense, sense_len, scsi_print_desc_func, &print_info); break; } case -1: /* * scsi_extract_sense_len() sets values to -1 if the * show_errors flag is set and they aren't present in the * sense data. This means that sense_len is 0. */ sbuf_printf(sb, "No sense data present\n"); break; default: { sbuf_printf(sb, "Error code 0x%x", error_code); if (sense->error_code & SSD_ERRCODE_VALID) { struct scsi_sense_data_fixed *fixed_sense; fixed_sense = (struct scsi_sense_data_fixed *)sense; if (SSD_FIXED_IS_PRESENT(fixed_sense, sense_len, info)){ uint32_t info; info = scsi_4btoul(fixed_sense->info); sbuf_printf(sb, " at block no. %d (decimal)", info); } } sbuf_printf(sb, "\n"); break; } } } /* * scsi_sense_sbuf() returns 0 for success and -1 for failure. */ #ifdef _KERNEL int scsi_sense_sbuf(struct ccb_scsiio *csio, struct sbuf *sb, scsi_sense_string_flags flags) #else /* !_KERNEL */ int scsi_sense_sbuf(struct cam_device *device, struct ccb_scsiio *csio, struct sbuf *sb, scsi_sense_string_flags flags) #endif /* _KERNEL/!_KERNEL */ { struct scsi_sense_data *sense; struct scsi_inquiry_data *inq_data; #ifdef _KERNEL struct ccb_getdev *cgd; #endif /* _KERNEL */ char path_str[64]; uint8_t *cdb; #ifndef _KERNEL if (device == NULL) return(-1); #endif /* !_KERNEL */ if ((csio == NULL) || (sb == NULL)) return(-1); /* * If the CDB is a physical address, we can't deal with it.. */ if ((csio->ccb_h.flags & CAM_CDB_PHYS) != 0) flags &= ~SSS_FLAG_PRINT_COMMAND; #ifdef _KERNEL xpt_path_string(csio->ccb_h.path, path_str, sizeof(path_str)); #else /* !_KERNEL */ cam_path_string(device, path_str, sizeof(path_str)); #endif /* _KERNEL/!_KERNEL */ #ifdef _KERNEL if ((cgd = (struct ccb_getdev*)xpt_alloc_ccb_nowait()) == NULL) return(-1); /* * Get the device information. */ xpt_setup_ccb(&cgd->ccb_h, csio->ccb_h.path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); /* * If the device is unconfigured, just pretend that it is a hard * drive. scsi_op_desc() needs this. */ if (cgd->ccb_h.status == CAM_DEV_NOT_THERE) cgd->inq_data.device = T_DIRECT; inq_data = &cgd->inq_data; #else /* !_KERNEL */ inq_data = &device->inq_data; #endif /* _KERNEL/!_KERNEL */ sense = NULL; if (flags & SSS_FLAG_PRINT_COMMAND) { sbuf_cat(sb, path_str); #ifdef _KERNEL scsi_command_string(csio, sb); #else /* !_KERNEL */ scsi_command_string(device, csio, sb); #endif /* _KERNEL/!_KERNEL */ sbuf_printf(sb, "\n"); } /* * If the sense data is a physical pointer, forget it. */ if (csio->ccb_h.flags & CAM_SENSE_PTR) { if (csio->ccb_h.flags & CAM_SENSE_PHYS) { #ifdef _KERNEL xpt_free_ccb((union ccb*)cgd); #endif /* _KERNEL/!_KERNEL */ return(-1); } else { /* * bcopy the pointer to avoid unaligned access * errors on finicky architectures. We don't * ensure that the sense data is pointer aligned. */ bcopy(&csio->sense_data, &sense, sizeof(struct scsi_sense_data *)); } } else { /* * If the physical sense flag is set, but the sense pointer * is not also set, we assume that the user is an idiot and * return. (Well, okay, it could be that somehow, the * entire csio is physical, but we would have probably core * dumped on one of the bogus pointer deferences above * already.) */ if (csio->ccb_h.flags & CAM_SENSE_PHYS) { #ifdef _KERNEL xpt_free_ccb((union ccb*)cgd); #endif /* _KERNEL/!_KERNEL */ return(-1); } else sense = &csio->sense_data; } if (csio->ccb_h.flags & CAM_CDB_POINTER) cdb = csio->cdb_io.cdb_ptr; else cdb = csio->cdb_io.cdb_bytes; scsi_sense_only_sbuf(sense, csio->sense_len - csio->sense_resid, sb, path_str, inq_data, cdb, csio->cdb_len); #ifdef _KERNEL xpt_free_ccb((union ccb*)cgd); #endif /* _KERNEL/!_KERNEL */ return(0); } #ifdef _KERNEL char * scsi_sense_string(struct ccb_scsiio *csio, char *str, int str_len) #else /* !_KERNEL */ char * scsi_sense_string(struct cam_device *device, struct ccb_scsiio *csio, char *str, int str_len) #endif /* _KERNEL/!_KERNEL */ { struct sbuf sb; sbuf_new(&sb, str, str_len, 0); #ifdef _KERNEL scsi_sense_sbuf(csio, &sb, SSS_FLAG_PRINT_COMMAND); #else /* !_KERNEL */ scsi_sense_sbuf(device, csio, &sb, SSS_FLAG_PRINT_COMMAND); #endif /* _KERNEL/!_KERNEL */ sbuf_finish(&sb); return(sbuf_data(&sb)); } #ifdef _KERNEL void scsi_sense_print(struct ccb_scsiio *csio) { struct sbuf sb; char str[512]; sbuf_new(&sb, str, sizeof(str), 0); scsi_sense_sbuf(csio, &sb, SSS_FLAG_PRINT_COMMAND); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #else /* !_KERNEL */ void scsi_sense_print(struct cam_device *device, struct ccb_scsiio *csio, FILE *ofile) { struct sbuf sb; char str[512]; if ((device == NULL) || (csio == NULL) || (ofile == NULL)) return; sbuf_new(&sb, str, sizeof(str), 0); scsi_sense_sbuf(device, csio, &sb, SSS_FLAG_PRINT_COMMAND); sbuf_finish(&sb); fprintf(ofile, "%s", sbuf_data(&sb)); } #endif /* _KERNEL/!_KERNEL */ /* * Extract basic sense information. This is backward-compatible with the * previous implementation. For new implementations, * scsi_extract_sense_len() is recommended. */ void scsi_extract_sense(struct scsi_sense_data *sense_data, int *error_code, int *sense_key, int *asc, int *ascq) { scsi_extract_sense_len(sense_data, sizeof(*sense_data), error_code, sense_key, asc, ascq, /*show_errors*/ 0); } /* * Extract basic sense information from SCSI I/O CCB structure. */ int scsi_extract_sense_ccb(union ccb *ccb, int *error_code, int *sense_key, int *asc, int *ascq) { struct scsi_sense_data *sense_data; /* Make sure there are some sense data we can access. */ if (ccb->ccb_h.func_code != XPT_SCSI_IO || (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_SCSI_STATUS_ERROR || (ccb->csio.scsi_status != SCSI_STATUS_CHECK_COND) || (ccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0 || (ccb->ccb_h.flags & CAM_SENSE_PHYS)) return (0); if (ccb->ccb_h.flags & CAM_SENSE_PTR) bcopy(&ccb->csio.sense_data, &sense_data, sizeof(struct scsi_sense_data *)); else sense_data = &ccb->csio.sense_data; scsi_extract_sense_len(sense_data, ccb->csio.sense_len - ccb->csio.sense_resid, error_code, sense_key, asc, ascq, 1); if (*error_code == -1) return (0); return (1); } /* * Extract basic sense information. If show_errors is set, sense values * will be set to -1 if they are not present. */ void scsi_extract_sense_len(struct scsi_sense_data *sense_data, u_int sense_len, int *error_code, int *sense_key, int *asc, int *ascq, int show_errors) { /* * If we have no length, we have no sense. */ if (sense_len == 0) { if (show_errors == 0) { *error_code = 0; *sense_key = 0; *asc = 0; *ascq = 0; } else { *error_code = -1; *sense_key = -1; *asc = -1; *ascq = -1; } return; } *error_code = sense_data->error_code & SSD_ERRCODE; switch (*error_code) { case SSD_DESC_CURRENT_ERROR: case SSD_DESC_DEFERRED_ERROR: { struct scsi_sense_data_desc *sense; sense = (struct scsi_sense_data_desc *)sense_data; if (SSD_DESC_IS_PRESENT(sense, sense_len, sense_key)) *sense_key = sense->sense_key & SSD_KEY; else *sense_key = (show_errors) ? -1 : 0; if (SSD_DESC_IS_PRESENT(sense, sense_len, add_sense_code)) *asc = sense->add_sense_code; else *asc = (show_errors) ? -1 : 0; if (SSD_DESC_IS_PRESENT(sense, sense_len, add_sense_code_qual)) *ascq = sense->add_sense_code_qual; else *ascq = (show_errors) ? -1 : 0; break; } case SSD_CURRENT_ERROR: case SSD_DEFERRED_ERROR: default: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (SSD_FIXED_IS_PRESENT(sense, sense_len, flags)) *sense_key = sense->flags & SSD_KEY; else *sense_key = (show_errors) ? -1 : 0; if ((SSD_FIXED_IS_PRESENT(sense, sense_len, add_sense_code)) && (SSD_FIXED_IS_FILLED(sense, add_sense_code))) *asc = sense->add_sense_code; else *asc = (show_errors) ? -1 : 0; if ((SSD_FIXED_IS_PRESENT(sense, sense_len,add_sense_code_qual)) && (SSD_FIXED_IS_FILLED(sense, add_sense_code_qual))) *ascq = sense->add_sense_code_qual; else *ascq = (show_errors) ? -1 : 0; break; } } } int scsi_get_sense_key(struct scsi_sense_data *sense_data, u_int sense_len, int show_errors) { int error_code, sense_key, asc, ascq; scsi_extract_sense_len(sense_data, sense_len, &error_code, &sense_key, &asc, &ascq, show_errors); return (sense_key); } int scsi_get_asc(struct scsi_sense_data *sense_data, u_int sense_len, int show_errors) { int error_code, sense_key, asc, ascq; scsi_extract_sense_len(sense_data, sense_len, &error_code, &sense_key, &asc, &ascq, show_errors); return (asc); } int scsi_get_ascq(struct scsi_sense_data *sense_data, u_int sense_len, int show_errors) { int error_code, sense_key, asc, ascq; scsi_extract_sense_len(sense_data, sense_len, &error_code, &sense_key, &asc, &ascq, show_errors); return (ascq); } /* * This function currently requires at least 36 bytes, or * SHORT_INQUIRY_LENGTH, worth of data to function properly. If this * function needs more or less data in the future, another length should be * defined in scsi_all.h to indicate the minimum amount of data necessary * for this routine to function properly. */ void scsi_print_inquiry(struct scsi_inquiry_data *inq_data) { u_int8_t type; char *dtype, *qtype; char vendor[16], product[48], revision[16], rstr[12]; type = SID_TYPE(inq_data); /* * Figure out basic device type and qualifier. */ if (SID_QUAL_IS_VENDOR_UNIQUE(inq_data)) { qtype = " (vendor-unique qualifier)"; } else { switch (SID_QUAL(inq_data)) { case SID_QUAL_LU_CONNECTED: qtype = ""; break; case SID_QUAL_LU_OFFLINE: qtype = " (offline)"; break; case SID_QUAL_RSVD: qtype = " (reserved qualifier)"; break; default: case SID_QUAL_BAD_LU: qtype = " (LUN not supported)"; break; } } switch (type) { case T_DIRECT: dtype = "Direct Access"; break; case T_SEQUENTIAL: dtype = "Sequential Access"; break; case T_PRINTER: dtype = "Printer"; break; case T_PROCESSOR: dtype = "Processor"; break; case T_WORM: dtype = "WORM"; break; case T_CDROM: dtype = "CD-ROM"; break; case T_SCANNER: dtype = "Scanner"; break; case T_OPTICAL: dtype = "Optical"; break; case T_CHANGER: dtype = "Changer"; break; case T_COMM: dtype = "Communication"; break; case T_STORARRAY: dtype = "Storage Array"; break; case T_ENCLOSURE: dtype = "Enclosure Services"; break; case T_RBC: dtype = "Simplified Direct Access"; break; case T_OCRW: dtype = "Optical Card Read/Write"; break; case T_OSD: dtype = "Object-Based Storage"; break; case T_ADC: dtype = "Automation/Drive Interface"; break; case T_NODEVICE: dtype = "Uninstalled"; break; default: dtype = "unknown"; break; } cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor), sizeof(vendor)); cam_strvis(product, inq_data->product, sizeof(inq_data->product), sizeof(product)); cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision), sizeof(revision)); if (SID_ANSI_REV(inq_data) == SCSI_REV_0) snprintf(rstr, sizeof(rstr), "SCSI"); else if (SID_ANSI_REV(inq_data) <= SCSI_REV_SPC) { snprintf(rstr, sizeof(rstr), "SCSI-%d", SID_ANSI_REV(inq_data)); } else { snprintf(rstr, sizeof(rstr), "SPC-%d SCSI", SID_ANSI_REV(inq_data) - 2); } printf("<%s %s %s> %s %s %s device%s\n", vendor, product, revision, SID_IS_REMOVABLE(inq_data) ? "Removable" : "Fixed", dtype, rstr, qtype); } void scsi_print_inquiry_short(struct scsi_inquiry_data *inq_data) { char vendor[16], product[48], revision[16]; cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor), sizeof(vendor)); cam_strvis(product, inq_data->product, sizeof(inq_data->product), sizeof(product)); cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision), sizeof(revision)); printf("<%s %s %s>", vendor, product, revision); } /* * Table of syncrates that don't follow the "divisible by 4" * rule. This table will be expanded in future SCSI specs. */ static struct { u_int period_factor; u_int period; /* in 100ths of ns */ } scsi_syncrates[] = { { 0x08, 625 }, /* FAST-160 */ { 0x09, 1250 }, /* FAST-80 */ { 0x0a, 2500 }, /* FAST-40 40MHz */ { 0x0b, 3030 }, /* FAST-40 33MHz */ { 0x0c, 5000 } /* FAST-20 */ }; /* * Return the frequency in kHz corresponding to the given * sync period factor. */ u_int scsi_calc_syncsrate(u_int period_factor) { int i; int num_syncrates; /* * It's a bug if period is zero, but if it is anyway, don't * die with a divide fault- instead return something which * 'approximates' async */ if (period_factor == 0) { return (3300); } num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]); /* See if the period is in the "exception" table */ for (i = 0; i < num_syncrates; i++) { if (period_factor == scsi_syncrates[i].period_factor) { /* Period in kHz */ return (100000000 / scsi_syncrates[i].period); } } /* * Wasn't in the table, so use the standard * 4 times conversion. */ return (10000000 / (period_factor * 4 * 10)); } /* * Return the SCSI sync parameter that corresponsd to * the passed in period in 10ths of ns. */ u_int scsi_calc_syncparam(u_int period) { int i; int num_syncrates; if (period == 0) return (~0); /* Async */ /* Adjust for exception table being in 100ths. */ period *= 10; num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]); /* See if the period is in the "exception" table */ for (i = 0; i < num_syncrates; i++) { if (period <= scsi_syncrates[i].period) { /* Period in 100ths of ns */ return (scsi_syncrates[i].period_factor); } } /* * Wasn't in the table, so use the standard * 1/4 period in ns conversion. */ return (period/400); } int scsi_devid_is_naa_ieee_reg(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; struct scsi_vpd_id_naa_basic *naa; descr = (struct scsi_vpd_id_descriptor *)bufp; naa = (struct scsi_vpd_id_naa_basic *)descr->identifier; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA) return 0; if (descr->length < sizeof(struct scsi_vpd_id_naa_ieee_reg)) return 0; if ((naa->naa >> SVPD_ID_NAA_NAA_SHIFT) != SVPD_ID_NAA_IEEE_REG) return 0; return 1; } int scsi_devid_is_sas_target(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if (!scsi_devid_is_naa_ieee_reg(bufp)) return 0; if ((descr->id_type & SVPD_ID_PIV) == 0) /* proto field reserved */ return 0; if ((descr->proto_codeset >> SVPD_ID_PROTO_SHIFT) != SCSI_PROTO_SAS) return 0; return 1; } int scsi_devid_is_lun_eui64(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_EUI64) return 0; return 1; } int scsi_devid_is_lun_naa(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA) return 0; return 1; } int scsi_devid_is_lun_t10(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_T10) return 0; return 1; } int scsi_devid_is_lun_name(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_SCSI_NAME) return 0; return 1; } struct scsi_vpd_id_descriptor * scsi_get_devid_desc(struct scsi_vpd_id_descriptor *desc, uint32_t len, scsi_devid_checkfn_t ck_fn) { uint8_t *desc_buf_end; desc_buf_end = (uint8_t *)desc + len; for (; desc->identifier <= desc_buf_end && desc->identifier + desc->length <= desc_buf_end; desc = (struct scsi_vpd_id_descriptor *)(desc->identifier + desc->length)) { if (ck_fn == NULL || ck_fn((uint8_t *)desc) != 0) return (desc); } return (NULL); } struct scsi_vpd_id_descriptor * scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t page_len, scsi_devid_checkfn_t ck_fn) { uint32_t len; if (page_len < sizeof(*id)) return (NULL); len = MIN(scsi_2btoul(id->length), page_len - sizeof(*id)); return (scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) id->desc_list, len, ck_fn)); } int scsi_transportid_sbuf(struct sbuf *sb, struct scsi_transportid_header *hdr, uint32_t valid_len) { switch (hdr->format_protocol & SCSI_TRN_PROTO_MASK) { case SCSI_PROTO_FC: { struct scsi_transportid_fcp *fcp; uint64_t n_port_name; fcp = (struct scsi_transportid_fcp *)hdr; n_port_name = scsi_8btou64(fcp->n_port_name); sbuf_printf(sb, "FCP address: 0x%.16jx",(uintmax_t)n_port_name); break; } case SCSI_PROTO_SPI: { struct scsi_transportid_spi *spi; spi = (struct scsi_transportid_spi *)hdr; sbuf_printf(sb, "SPI address: %u,%u", scsi_2btoul(spi->scsi_addr), scsi_2btoul(spi->rel_trgt_port_id)); break; } case SCSI_PROTO_SSA: /* * XXX KDM there is no transport ID defined in SPC-4 for * SSA. */ break; case SCSI_PROTO_1394: { struct scsi_transportid_1394 *sbp; uint64_t eui64; sbp = (struct scsi_transportid_1394 *)hdr; eui64 = scsi_8btou64(sbp->eui64); sbuf_printf(sb, "SBP address: 0x%.16jx", (uintmax_t)eui64); break; } case SCSI_PROTO_RDMA: { struct scsi_transportid_rdma *rdma; unsigned int i; rdma = (struct scsi_transportid_rdma *)hdr; sbuf_printf(sb, "RDMA address: 0x"); for (i = 0; i < sizeof(rdma->initiator_port_id); i++) sbuf_printf(sb, "%02x", rdma->initiator_port_id[i]); break; } case SCSI_PROTO_ISCSI: { uint32_t add_len, i; uint8_t *iscsi_name = NULL; int nul_found = 0; sbuf_printf(sb, "iSCSI address: "); if ((hdr->format_protocol & SCSI_TRN_FORMAT_MASK) == SCSI_TRN_ISCSI_FORMAT_DEVICE) { struct scsi_transportid_iscsi_device *dev; dev = (struct scsi_transportid_iscsi_device *)hdr; /* * Verify how much additional data we really have. */ add_len = scsi_2btoul(dev->additional_length); add_len = MIN(add_len, valid_len - __offsetof(struct scsi_transportid_iscsi_device, iscsi_name)); iscsi_name = &dev->iscsi_name[0]; } else if ((hdr->format_protocol & SCSI_TRN_FORMAT_MASK) == SCSI_TRN_ISCSI_FORMAT_PORT) { struct scsi_transportid_iscsi_port *port; port = (struct scsi_transportid_iscsi_port *)hdr; add_len = scsi_2btoul(port->additional_length); add_len = MIN(add_len, valid_len - __offsetof(struct scsi_transportid_iscsi_port, iscsi_name)); iscsi_name = &port->iscsi_name[0]; } else { sbuf_printf(sb, "unknown format %x", (hdr->format_protocol & SCSI_TRN_FORMAT_MASK) >> SCSI_TRN_FORMAT_SHIFT); break; } if (add_len == 0) { sbuf_printf(sb, "not enough data"); break; } /* * This is supposed to be a NUL-terminated ASCII * string, but you never know. So we're going to * check. We need to do this because there is no * sbuf equivalent of strncat(). */ for (i = 0; i < add_len; i++) { if (iscsi_name[i] == '\0') { nul_found = 1; break; } } /* * If there is a NUL in the name, we can just use * sbuf_cat(). Otherwise we need to use sbuf_bcat(). */ if (nul_found != 0) sbuf_cat(sb, iscsi_name); else sbuf_bcat(sb, iscsi_name, add_len); break; } case SCSI_PROTO_SAS: { struct scsi_transportid_sas *sas; uint64_t sas_addr; sas = (struct scsi_transportid_sas *)hdr; sas_addr = scsi_8btou64(sas->sas_address); sbuf_printf(sb, "SAS address: 0x%.16jx", (uintmax_t)sas_addr); break; } case SCSI_PROTO_ADITP: case SCSI_PROTO_ATA: case SCSI_PROTO_UAS: /* * No Transport ID format for ADI, ATA or USB is defined in * SPC-4. */ sbuf_printf(sb, "No known Transport ID format for protocol " "%#x", hdr->format_protocol & SCSI_TRN_PROTO_MASK); break; case SCSI_PROTO_SOP: { struct scsi_transportid_sop *sop; struct scsi_sop_routing_id_norm *rid; sop = (struct scsi_transportid_sop *)hdr; rid = (struct scsi_sop_routing_id_norm *)sop->routing_id; /* * Note that there is no alternate format specified in SPC-4 * for the PCIe routing ID, so we don't really have a way * to know whether the second byte of the routing ID is * a device and function or just a function. So we just * assume bus,device,function. */ sbuf_printf(sb, "SOP Routing ID: %u,%u,%u", rid->bus, rid->devfunc >> SCSI_TRN_SOP_DEV_SHIFT, rid->devfunc & SCSI_TRN_SOP_FUNC_NORM_MAX); break; } case SCSI_PROTO_NONE: default: sbuf_printf(sb, "Unknown protocol %#x", hdr->format_protocol & SCSI_TRN_PROTO_MASK); break; } return (0); } struct scsi_nv scsi_proto_map[] = { { "fcp", SCSI_PROTO_FC }, { "spi", SCSI_PROTO_SPI }, { "ssa", SCSI_PROTO_SSA }, { "sbp", SCSI_PROTO_1394 }, { "1394", SCSI_PROTO_1394 }, { "srp", SCSI_PROTO_RDMA }, { "rdma", SCSI_PROTO_RDMA }, { "iscsi", SCSI_PROTO_ISCSI }, { "iqn", SCSI_PROTO_ISCSI }, { "sas", SCSI_PROTO_SAS }, { "aditp", SCSI_PROTO_ADITP }, { "ata", SCSI_PROTO_ATA }, { "uas", SCSI_PROTO_UAS }, { "usb", SCSI_PROTO_UAS }, { "sop", SCSI_PROTO_SOP } }; const char * scsi_nv_to_str(struct scsi_nv *table, int num_table_entries, uint64_t value) { int i; for (i = 0; i < num_table_entries; i++) { if (table[i].value == value) return (table[i].name); } return (NULL); } /* * Given a name/value table, find a value matching the given name. * Return values: * SCSI_NV_FOUND - match found * SCSI_NV_AMBIGUOUS - more than one match, none of them exact * SCSI_NV_NOT_FOUND - no match found */ scsi_nv_status scsi_get_nv(struct scsi_nv *table, int num_table_entries, char *name, int *table_entry, scsi_nv_flags flags) { int i, num_matches = 0; for (i = 0; i < num_table_entries; i++) { size_t table_len, name_len; table_len = strlen(table[i].name); name_len = strlen(name); if ((((flags & SCSI_NV_FLAG_IG_CASE) != 0) && (strncasecmp(table[i].name, name, name_len) == 0)) || (((flags & SCSI_NV_FLAG_IG_CASE) == 0) && (strncmp(table[i].name, name, name_len) == 0))) { *table_entry = i; /* * Check for an exact match. If we have the same * number of characters in the table as the argument, * and we already know they're the same, we have * an exact match. */ if (table_len == name_len) return (SCSI_NV_FOUND); /* * Otherwise, bump up the number of matches. We'll * see later how many we have. */ num_matches++; } } if (num_matches > 1) return (SCSI_NV_AMBIGUOUS); else if (num_matches == 1) return (SCSI_NV_FOUND); else return (SCSI_NV_NOT_FOUND); } /* * Parse transport IDs for Fibre Channel, 1394 and SAS. Since these are * all 64-bit numbers, the code is similar. */ int scsi_parse_transportid_64bit(int proto_id, char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { uint64_t value; char *endptr; int retval; size_t alloc_size; retval = 0; value = strtouq(id_str, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing ID %s, 64-bit number required", __func__, id_str); } retval = 1; goto bailout; } switch (proto_id) { case SCSI_PROTO_FC: alloc_size = sizeof(struct scsi_transportid_fcp); break; case SCSI_PROTO_1394: alloc_size = sizeof(struct scsi_transportid_1394); break; case SCSI_PROTO_SAS: alloc_size = sizeof(struct scsi_transportid_sas); break; default: if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unsupoprted " "protocol %d", __func__, proto_id); } retval = 1; goto bailout; break; /* NOTREACHED */ } #ifdef _KERNEL *hdr = malloc(alloc_size, type, flags); #else /* _KERNEL */ *hdr = malloc(alloc_size); #endif /*_KERNEL */ if (*hdr == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, alloc_size); } retval = 1; goto bailout; } *alloc_len = alloc_size; bzero(*hdr, alloc_size); switch (proto_id) { case SCSI_PROTO_FC: { struct scsi_transportid_fcp *fcp; fcp = (struct scsi_transportid_fcp *)(*hdr); fcp->format_protocol = SCSI_PROTO_FC | SCSI_TRN_FCP_FORMAT_DEFAULT; scsi_u64to8b(value, fcp->n_port_name); break; } case SCSI_PROTO_1394: { struct scsi_transportid_1394 *sbp; sbp = (struct scsi_transportid_1394 *)(*hdr); sbp->format_protocol = SCSI_PROTO_1394 | SCSI_TRN_1394_FORMAT_DEFAULT; scsi_u64to8b(value, sbp->eui64); break; } case SCSI_PROTO_SAS: { struct scsi_transportid_sas *sas; sas = (struct scsi_transportid_sas *)(*hdr); sas->format_protocol = SCSI_PROTO_SAS | SCSI_TRN_SAS_FORMAT_DEFAULT; scsi_u64to8b(value, sas->sas_address); break; } default: break; } bailout: return (retval); } /* * Parse a SPI (Parallel SCSI) address of the form: id,rel_tgt_port */ int scsi_parse_transportid_spi(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { unsigned long scsi_addr, target_port; struct scsi_transportid_spi *spi; char *tmpstr, *endptr; int retval; retval = 0; tmpstr = strsep(&id_str, ","); if (tmpstr == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no ID found", __func__); } retval = 1; goto bailout; } scsi_addr = strtoul(tmpstr, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing SCSI ID %s, number required", __func__, tmpstr); } retval = 1; goto bailout; } if (id_str == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no relative " "target port found", __func__); } retval = 1; goto bailout; } target_port = strtoul(id_str, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing relative target port %s, number " "required", __func__, id_str); } retval = 1; goto bailout; } #ifdef _KERNEL spi = malloc(sizeof(*spi), type, flags); #else spi = malloc(sizeof(*spi)); #endif if (spi == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, sizeof(*spi)); } retval = 1; goto bailout; } *alloc_len = sizeof(*spi); bzero(spi, sizeof(*spi)); spi->format_protocol = SCSI_PROTO_SPI | SCSI_TRN_SPI_FORMAT_DEFAULT; scsi_ulto2b(scsi_addr, spi->scsi_addr); scsi_ulto2b(target_port, spi->rel_trgt_port_id); *hdr = (struct scsi_transportid_header *)spi; bailout: return (retval); } /* * Parse an RDMA/SRP Initiator Port ID string. This is 32 hexadecimal digits, * optionally prefixed by "0x" or "0X". */ int scsi_parse_transportid_rdma(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { struct scsi_transportid_rdma *rdma; int retval; size_t id_len, rdma_id_size; uint8_t rdma_id[SCSI_TRN_RDMA_PORT_LEN]; char *tmpstr; unsigned int i, j; retval = 0; id_len = strlen(id_str); rdma_id_size = SCSI_TRN_RDMA_PORT_LEN; /* * Check the size. It needs to be either 32 or 34 characters long. */ if ((id_len != (rdma_id_size * 2)) && (id_len != ((rdma_id_size * 2) + 2))) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: RDMA ID " "must be 32 hex digits (0x prefix " "optional), only %zu seen", __func__, id_len); } retval = 1; goto bailout; } tmpstr = id_str; /* * If the user gave us 34 characters, the string needs to start * with '0x'. */ if (id_len == ((rdma_id_size * 2) + 2)) { if ((tmpstr[0] == '0') && ((tmpstr[1] == 'x') || (tmpstr[1] == 'X'))) { tmpstr += 2; } else { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: RDMA " "ID prefix, if used, must be \"0x\", " "got %s", __func__, tmpstr); } retval = 1; goto bailout; } } bzero(rdma_id, sizeof(rdma_id)); /* * Convert ASCII hex into binary bytes. There is no standard * 128-bit integer type, and so no strtou128t() routine to convert * from hex into a large integer. In the end, we're not going to * an integer, but rather to a byte array, so that and the fact * that we require the user to give us 32 hex digits simplifies the * logic. */ for (i = 0; i < (rdma_id_size * 2); i++) { int cur_shift; unsigned char c; /* Increment the byte array one for every 2 hex digits */ j = i >> 1; /* * The first digit in every pair is the most significant * 4 bits. The second is the least significant 4 bits. */ if ((i % 2) == 0) cur_shift = 4; else cur_shift = 0; c = tmpstr[i]; /* Convert the ASCII hex character into a number */ if (isdigit(c)) c -= '0'; else if (isalpha(c)) c -= isupper(c) ? 'A' - 10 : 'a' - 10; else { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "RDMA ID must be hex digits, got " "invalid character %c", __func__, tmpstr[i]); } retval = 1; goto bailout; } /* * The converted number can't be less than 0; the type is * unsigned, and the subtraction logic will not give us * a negative number. So we only need to make sure that * the value is not greater than 0xf. (i.e. make sure the * user didn't give us a value like "0x12jklmno"). */ if (c > 0xf) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "RDMA ID must be hex digits, got " "invalid character %c", __func__, tmpstr[i]); } retval = 1; goto bailout; } rdma_id[j] |= c << cur_shift; } #ifdef _KERNEL rdma = malloc(sizeof(*rdma), type, flags); #else rdma = malloc(sizeof(*rdma)); #endif if (rdma == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, sizeof(*rdma)); } retval = 1; goto bailout; } *alloc_len = sizeof(*rdma); bzero(rdma, *alloc_len); rdma->format_protocol = SCSI_PROTO_RDMA | SCSI_TRN_RDMA_FORMAT_DEFAULT; bcopy(rdma_id, rdma->initiator_port_id, SCSI_TRN_RDMA_PORT_LEN); *hdr = (struct scsi_transportid_header *)rdma; bailout: return (retval); } /* * Parse an iSCSI name. The format is either just the name: * * iqn.2012-06.com.example:target0 * or the name, separator and initiator session ID: * * iqn.2012-06.com.example:target0,i,0x123 * * The separator format is exact. */ int scsi_parse_transportid_iscsi(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { size_t id_len, sep_len, id_size, name_len; int retval; unsigned int i, sep_pos, sep_found; const char *sep_template = ",i,0x"; const char *iqn_prefix = "iqn."; struct scsi_transportid_iscsi_device *iscsi; retval = 0; sep_found = 0; id_len = strlen(id_str); sep_len = strlen(sep_template); /* * The separator is defined as exactly ',i,0x'. Any other commas, * or any other form, is an error. So look for a comma, and once * we find that, the next few characters must match the separator * exactly. Once we get through the separator, there should be at * least one character. */ for (i = 0, sep_pos = 0; i < id_len; i++) { if (sep_pos == 0) { if (id_str[i] == sep_template[sep_pos]) sep_pos++; continue; } if (sep_pos < sep_len) { if (id_str[i] == sep_template[sep_pos]) { sep_pos++; continue; } if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "invalid separator in iSCSI name " "\"%s\"", __func__, id_str); } retval = 1; goto bailout; } else { sep_found = 1; break; } } /* * Check to see whether we have a separator but no digits after it. */ if ((sep_pos != 0) && (sep_found == 0)) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no digits " "found after separator in iSCSI name \"%s\"", __func__, id_str); } retval = 1; goto bailout; } /* * The incoming ID string has the "iqn." prefix stripped off. We * need enough space for the base structure (the structures are the * same for the two iSCSI forms), the prefix, the ID string and a * terminating NUL. */ id_size = sizeof(*iscsi) + strlen(iqn_prefix) + id_len + 1; #ifdef _KERNEL iscsi = malloc(id_size, type, flags); #else iscsi = malloc(id_size); #endif if (iscsi == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, id_size); } retval = 1; goto bailout; } *alloc_len = id_size; bzero(iscsi, id_size); iscsi->format_protocol = SCSI_PROTO_ISCSI; if (sep_found == 0) iscsi->format_protocol |= SCSI_TRN_ISCSI_FORMAT_DEVICE; else iscsi->format_protocol |= SCSI_TRN_ISCSI_FORMAT_PORT; name_len = id_size - sizeof(*iscsi); scsi_ulto2b(name_len, iscsi->additional_length); snprintf(iscsi->iscsi_name, name_len, "%s%s", iqn_prefix, id_str); *hdr = (struct scsi_transportid_header *)iscsi; bailout: return (retval); } /* * Parse a SCSI over PCIe (SOP) identifier. The Routing ID can either be * of the form 'bus,device,function' or 'bus,function'. */ int scsi_parse_transportid_sop(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { struct scsi_transportid_sop *sop; unsigned long bus, device, function; char *tmpstr, *endptr; int retval, device_spec; retval = 0; device_spec = 0; device = 0; tmpstr = strsep(&id_str, ","); if ((tmpstr == NULL) || (*tmpstr == '\0')) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no ID found", __func__); } retval = 1; goto bailout; } bus = strtoul(tmpstr, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing PCIe bus %s, number required", __func__, tmpstr); } retval = 1; goto bailout; } if ((id_str == NULL) || (*id_str == '\0')) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no PCIe " "device or function found", __func__); } retval = 1; goto bailout; } tmpstr = strsep(&id_str, ","); function = strtoul(tmpstr, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing PCIe device/function %s, number " "required", __func__, tmpstr); } retval = 1; goto bailout; } /* * Check to see whether the user specified a third value. If so, * the second is the device. */ if (id_str != NULL) { if (*id_str == '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "no PCIe function found", __func__); } retval = 1; goto bailout; } device = function; device_spec = 1; function = strtoul(id_str, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "error parsing PCIe function %s, " "number required", __func__, id_str); } retval = 1; goto bailout; } } if (bus > SCSI_TRN_SOP_BUS_MAX) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: bus value " "%lu greater than maximum %u", __func__, bus, SCSI_TRN_SOP_BUS_MAX); } retval = 1; goto bailout; } if ((device_spec != 0) && (device > SCSI_TRN_SOP_DEV_MASK)) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: device value " "%lu greater than maximum %u", __func__, device, SCSI_TRN_SOP_DEV_MAX); } retval = 1; goto bailout; } if (((device_spec != 0) && (function > SCSI_TRN_SOP_FUNC_NORM_MAX)) || ((device_spec == 0) && (function > SCSI_TRN_SOP_FUNC_ALT_MAX))) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: function value " "%lu greater than maximum %u", __func__, function, (device_spec == 0) ? SCSI_TRN_SOP_FUNC_ALT_MAX : SCSI_TRN_SOP_FUNC_NORM_MAX); } retval = 1; goto bailout; } #ifdef _KERNEL sop = malloc(sizeof(*sop), type, flags); #else sop = malloc(sizeof(*sop)); #endif if (sop == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, sizeof(*sop)); } retval = 1; goto bailout; } *alloc_len = sizeof(*sop); bzero(sop, sizeof(*sop)); sop->format_protocol = SCSI_PROTO_SOP | SCSI_TRN_SOP_FORMAT_DEFAULT; if (device_spec != 0) { struct scsi_sop_routing_id_norm rid; rid.bus = bus; rid.devfunc = (device << SCSI_TRN_SOP_DEV_SHIFT) | function; bcopy(&rid, sop->routing_id, MIN(sizeof(rid), sizeof(sop->routing_id))); } else { struct scsi_sop_routing_id_alt rid; rid.bus = bus; rid.function = function; bcopy(&rid, sop->routing_id, MIN(sizeof(rid), sizeof(sop->routing_id))); } *hdr = (struct scsi_transportid_header *)sop; bailout: return (retval); } /* * transportid_str: NUL-terminated string with format: protcol,id * The ID is protocol specific. * hdr: Storage will be allocated for the transport ID. * alloc_len: The amount of memory allocated is returned here. * type: Malloc bucket (kernel only). * flags: Malloc flags (kernel only). * error_str: If non-NULL, it will contain error information (without * a terminating newline) if an error is returned. * error_str_len: Allocated length of the error string. * * Returns 0 for success, non-zero for failure. */ int scsi_parse_transportid(char *transportid_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { char *tmpstr; scsi_nv_status status; int retval, num_proto_entries, table_entry; retval = 0; table_entry = 0; /* * We do allow a period as well as a comma to separate the protocol * from the ID string. This is to accommodate iSCSI names, which * start with "iqn.". */ tmpstr = strsep(&transportid_str, ",."); if (tmpstr == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: transportid_str is NULL", __func__); } retval = 1; goto bailout; } num_proto_entries = sizeof(scsi_proto_map) / sizeof(scsi_proto_map[0]); status = scsi_get_nv(scsi_proto_map, num_proto_entries, tmpstr, &table_entry, SCSI_NV_FLAG_IG_CASE); if (status != SCSI_NV_FOUND) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: %s protocol " "name %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", tmpstr); } retval = 1; goto bailout; } switch (scsi_proto_map[table_entry].value) { case SCSI_PROTO_FC: case SCSI_PROTO_1394: case SCSI_PROTO_SAS: retval = scsi_parse_transportid_64bit( scsi_proto_map[table_entry].value, transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_SPI: retval = scsi_parse_transportid_spi(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_RDMA: retval = scsi_parse_transportid_rdma(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_ISCSI: retval = scsi_parse_transportid_iscsi(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_SOP: retval = scsi_parse_transportid_sop(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_SSA: case SCSI_PROTO_ADITP: case SCSI_PROTO_ATA: case SCSI_PROTO_UAS: case SCSI_PROTO_NONE: default: /* * There is no format defined for a Transport ID for these * protocols. So even if the user gives us something, we * have no way to turn it into a standard SCSI Transport ID. */ retval = 1; if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no Transport " "ID format exists for protocol %s", __func__, tmpstr); } goto bailout; break; /* NOTREACHED */ } bailout: return (retval); } struct scsi_attrib_table_entry scsi_mam_attr_table[] = { { SMA_ATTR_REM_CAP_PARTITION, SCSI_ATTR_FLAG_NONE, "Remaining Capacity in Partition", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf,/*parse_str*/ NULL }, { SMA_ATTR_MAX_CAP_PARTITION, SCSI_ATTR_FLAG_NONE, "Maximum Capacity in Partition", /*suffix*/"MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TAPEALERT_FLAGS, SCSI_ATTR_FLAG_HEX, "TapeAlert Flags", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_LOAD_COUNT, SCSI_ATTR_FLAG_NONE, "Load Count", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MAM_SPACE_REMAINING, SCSI_ATTR_FLAG_NONE, "MAM Space Remaining", /*suffix*/"bytes", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_ASSIGNING_ORG, SCSI_ATTR_FLAG_NONE, "Assigning Organization", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_FORMAT_DENSITY_CODE, SCSI_ATTR_FLAG_HEX, "Format Density Code", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_INITIALIZATION_COUNT, SCSI_ATTR_FLAG_NONE, "Initialization Count", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_VOLUME_ID, SCSI_ATTR_FLAG_NONE, "Volume Identifier", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_VOLUME_CHANGE_REF, SCSI_ATTR_FLAG_HEX, "Volume Change Reference", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD_1, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load - 1", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD_2, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load - 2", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD_3, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load - 3", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_WRITTEN_LT, SCSI_ATTR_FLAG_NONE, "Total MB Written in Medium Life", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_READ_LT, SCSI_ATTR_FLAG_NONE, "Total MB Read in Medium Life", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_WRITTEN_CUR, SCSI_ATTR_FLAG_NONE, "Total MB Written in Current/Last Load", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_READ_CUR, SCSI_ATTR_FLAG_NONE, "Total MB Read in Current/Last Load", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_FIRST_ENC_BLOCK, SCSI_ATTR_FLAG_NONE, "Logical Position of First Encrypted Block", /*suffix*/ NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_NEXT_UNENC_BLOCK, SCSI_ATTR_FLAG_NONE, "Logical Position of First Unencrypted Block after First " "Encrypted Block", /*suffix*/ NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MEDIUM_USAGE_HIST, SCSI_ATTR_FLAG_NONE, "Medium Usage History", /*suffix*/ NULL, /*to_str*/ NULL, /*parse_str*/ NULL }, { SMA_ATTR_PART_USAGE_HIST, SCSI_ATTR_FLAG_NONE, "Partition Usage History", /*suffix*/ NULL, /*to_str*/ NULL, /*parse_str*/ NULL }, { SMA_ATTR_MED_MANUF, SCSI_ATTR_FLAG_NONE, "Medium Manufacturer", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_SERIAL, SCSI_ATTR_FLAG_NONE, "Medium Serial Number", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_LENGTH, SCSI_ATTR_FLAG_NONE, "Medium Length", /*suffix*/"m", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_WIDTH, SCSI_ATTR_FLAG_FP | SCSI_ATTR_FLAG_DIV_10 | SCSI_ATTR_FLAG_FP_1DIGIT, "Medium Width", /*suffix*/"mm", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_ASSIGNING_ORG, SCSI_ATTR_FLAG_NONE, "Assigning Organization", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_DENSITY_CODE, SCSI_ATTR_FLAG_HEX, "Medium Density Code", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_MANUF_DATE, SCSI_ATTR_FLAG_NONE, "Medium Manufacture Date", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MAM_CAPACITY, SCSI_ATTR_FLAG_NONE, "MAM Capacity", /*suffix*/"bytes", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_TYPE, SCSI_ATTR_FLAG_HEX, "Medium Type", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_TYPE_INFO, SCSI_ATTR_FLAG_HEX, "Medium Type Information", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_SERIAL_NUM, SCSI_ATTR_FLAG_NONE, "Medium Serial Number", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_VENDOR, SCSI_ATTR_FLAG_NONE, "Application Vendor", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_NAME, SCSI_ATTR_FLAG_NONE, "Application Name", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_VERSION, SCSI_ATTR_FLAG_NONE, "Application Version", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_USER_MED_TEXT_LABEL, SCSI_ATTR_FLAG_NONE, "User Medium Text Label", /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_LAST_WRITTEN_TIME, SCSI_ATTR_FLAG_NONE, "Date and Time Last Written", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TEXT_LOCAL_ID, SCSI_ATTR_FLAG_HEX, "Text Localization Identifier", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_BARCODE, SCSI_ATTR_FLAG_NONE, "Barcode", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_HOST_OWNER_NAME, SCSI_ATTR_FLAG_NONE, "Owning Host Textual Name", /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MEDIA_POOL, SCSI_ATTR_FLAG_NONE, "Media Pool", /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_PART_USER_LABEL, SCSI_ATTR_FLAG_NONE, "Partition User Text Label", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_LOAD_UNLOAD_AT_PART, SCSI_ATTR_FLAG_NONE, "Load/Unload at Partition", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_FORMAT_VERSION, SCSI_ATTR_FLAG_NONE, "Application Format Version", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_VOL_COHERENCY_INFO, SCSI_ATTR_FLAG_NONE, "Volume Coherency Information", /*suffix*/NULL, /*to_str*/ scsi_attrib_volcoh_sbuf, /*parse_str*/ NULL }, { 0x0ff1, SCSI_ATTR_FLAG_NONE, "Spectra MLM Creation", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff2, SCSI_ATTR_FLAG_NONE, "Spectra MLM C3", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff3, SCSI_ATTR_FLAG_NONE, "Spectra MLM RW", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff4, SCSI_ATTR_FLAG_NONE, "Spectra MLM SDC List", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff7, SCSI_ATTR_FLAG_NONE, "Spectra MLM Post Scan", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ffe, SCSI_ATTR_FLAG_NONE, "Spectra MLM Checksum", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f1, SCSI_ATTR_FLAG_NONE, "Spectra MLM Creation", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f2, SCSI_ATTR_FLAG_NONE, "Spectra MLM C3", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f3, SCSI_ATTR_FLAG_NONE, "Spectra MLM RW", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f4, SCSI_ATTR_FLAG_NONE, "Spectra MLM SDC List", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f7, SCSI_ATTR_FLAG_NONE, "Spectra MLM Post Scan", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17ff, SCSI_ATTR_FLAG_NONE, "Spectra MLM Checksum", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, }; /* * Print out Volume Coherency Information (Attribute 0x080c). * This field has two variable length members, including one at the * beginning, so it isn't practical to have a fixed structure definition. * This is current as of SSC4r03 (see section 4.2.21.3), dated March 25, * 2013. */ int scsi_attrib_volcoh_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size; uint64_t tmp_val; uint8_t *cur_ptr; int retval; int vcr_len, as_len; retval = 0; tmp_val = 0; field_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); if (field_size > avail_len) { if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; goto bailout; } else if (field_size == 0) { /* * It isn't clear from the spec whether a field length of * 0 is invalid here. It probably is, but be lenient here * to avoid inconveniencing the user. */ goto bailout; } cur_ptr = hdr->attribute; vcr_len = *cur_ptr; cur_ptr++; sbuf_printf(sb, "\n\tVolume Change Reference Value:"); switch (vcr_len) { case 0: if (error_str != NULL) { snprintf(error_str, error_str_len, "Volume Change " "Reference value has length of 0"); } retval = 1; goto bailout; break; /*NOTREACHED*/ case 1: tmp_val = *cur_ptr; break; case 2: tmp_val = scsi_2btoul(cur_ptr); break; case 3: tmp_val = scsi_3btoul(cur_ptr); break; case 4: tmp_val = scsi_4btoul(cur_ptr); break; case 8: tmp_val = scsi_8btou64(cur_ptr); break; default: sbuf_printf(sb, "\n"); sbuf_hexdump(sb, cur_ptr, vcr_len, NULL, 0); break; } if (vcr_len <= 8) sbuf_printf(sb, " 0x%jx\n", (uintmax_t)tmp_val); cur_ptr += vcr_len; tmp_val = scsi_8btou64(cur_ptr); sbuf_printf(sb, "\tVolume Coherency Count: %ju\n", (uintmax_t)tmp_val); cur_ptr += sizeof(tmp_val); tmp_val = scsi_8btou64(cur_ptr); sbuf_printf(sb, "\tVolume Coherency Set Identifier: 0x%jx\n", (uintmax_t)tmp_val); /* * Figure out how long the Application Client Specific Information * is and produce a hexdump. */ cur_ptr += sizeof(tmp_val); as_len = scsi_2btoul(cur_ptr); cur_ptr += sizeof(uint16_t); sbuf_printf(sb, "\tApplication Client Specific Information: "); if (((as_len == SCSI_LTFS_VER0_LEN) || (as_len == SCSI_LTFS_VER1_LEN)) && (strncmp(cur_ptr, SCSI_LTFS_STR_NAME, SCSI_LTFS_STR_LEN) == 0)) { sbuf_printf(sb, "LTFS\n"); cur_ptr += SCSI_LTFS_STR_LEN + 1; if (cur_ptr[SCSI_LTFS_UUID_LEN] != '\0') cur_ptr[SCSI_LTFS_UUID_LEN] = '\0'; sbuf_printf(sb, "\tLTFS UUID: %s\n", cur_ptr); cur_ptr += SCSI_LTFS_UUID_LEN + 1; /* XXX KDM check the length */ sbuf_printf(sb, "\tLTFS Version: %d\n", *cur_ptr); } else { sbuf_printf(sb, "Unknown\n"); sbuf_hexdump(sb, cur_ptr, as_len, NULL, 0); } bailout: return (retval); } int scsi_attrib_vendser_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size; struct scsi_attrib_vendser *vendser; cam_strvis_flags strvis_flags; int retval = 0; field_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); if (field_size > avail_len) { if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; goto bailout; } else if (field_size == 0) { /* * A field size of 0 doesn't make sense here. The device * can at least give you the vendor ID, even if it can't * give you the serial number. */ if (error_str != NULL) { snprintf(error_str, error_str_len, "The length of " "attribute ID 0x%.4x is 0", scsi_2btoul(hdr->id)); } retval = 1; goto bailout; } vendser = (struct scsi_attrib_vendser *)hdr->attribute; switch (output_flags & SCSI_ATTR_OUTPUT_NONASCII_MASK) { case SCSI_ATTR_OUTPUT_NONASCII_TRIM: strvis_flags = CAM_STRVIS_FLAG_NONASCII_TRIM; break; case SCSI_ATTR_OUTPUT_NONASCII_RAW: strvis_flags = CAM_STRVIS_FLAG_NONASCII_RAW; break; case SCSI_ATTR_OUTPUT_NONASCII_ESC: default: strvis_flags = CAM_STRVIS_FLAG_NONASCII_ESC; break;; } cam_strvis_sbuf(sb, vendser->vendor, sizeof(vendser->vendor), strvis_flags); sbuf_putc(sb, ' '); cam_strvis_sbuf(sb, vendser->serial_num, sizeof(vendser->serial_num), strvis_flags); bailout: return (retval); } int scsi_attrib_hexdump_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { uint32_t field_size; ssize_t avail_len; uint32_t print_len; uint8_t *num_ptr; int retval = 0; field_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); print_len = MIN(avail_len, field_size); num_ptr = hdr->attribute; if (print_len > 0) { sbuf_printf(sb, "\n"); sbuf_hexdump(sb, num_ptr, print_len, NULL, 0); } return (retval); } int scsi_attrib_int_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { uint64_t print_number; size_t avail_len; uint32_t number_size; int retval = 0; number_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); if (avail_len < number_size) { if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, number_size); } retval = 1; goto bailout; } switch (number_size) { case 0: /* * We don't treat this as an error, since there may be * scenarios where a device reports a field but then gives * a length of 0. See the note in scsi_attrib_ascii_sbuf(). */ goto bailout; break; /*NOTREACHED*/ case 1: print_number = hdr->attribute[0]; break; case 2: print_number = scsi_2btoul(hdr->attribute); break; case 3: print_number = scsi_3btoul(hdr->attribute); break; case 4: print_number = scsi_4btoul(hdr->attribute); break; case 8: print_number = scsi_8btou64(hdr->attribute); break; default: /* * If we wind up here, the number is too big to print * normally, so just do a hexdump. */ retval = scsi_attrib_hexdump_sbuf(sb, hdr, valid_len, flags, output_flags, error_str, error_str_len); goto bailout; break; } if (flags & SCSI_ATTR_FLAG_FP) { #ifndef _KERNEL long double num_float; num_float = (long double)print_number; if (flags & SCSI_ATTR_FLAG_DIV_10) num_float /= 10; sbuf_printf(sb, "%.*Lf", (flags & SCSI_ATTR_FLAG_FP_1DIGIT) ? 1 : 0, num_float); #else /* _KERNEL */ sbuf_printf(sb, "%ju", (flags & SCSI_ATTR_FLAG_DIV_10) ? (print_number / 10) : print_number); #endif /* _KERNEL */ } else if (flags & SCSI_ATTR_FLAG_HEX) { sbuf_printf(sb, "0x%jx", (uintmax_t)print_number); } else sbuf_printf(sb, "%ju", (uintmax_t)print_number); bailout: return (retval); } int scsi_attrib_ascii_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size, print_size; int retval = 0; avail_len = valid_len - sizeof(*hdr); field_size = scsi_2btoul(hdr->length); print_size = MIN(avail_len, field_size); if (print_size > 0) { cam_strvis_flags strvis_flags; switch (output_flags & SCSI_ATTR_OUTPUT_NONASCII_MASK) { case SCSI_ATTR_OUTPUT_NONASCII_TRIM: strvis_flags = CAM_STRVIS_FLAG_NONASCII_TRIM; break; case SCSI_ATTR_OUTPUT_NONASCII_RAW: strvis_flags = CAM_STRVIS_FLAG_NONASCII_RAW; break; case SCSI_ATTR_OUTPUT_NONASCII_ESC: default: strvis_flags = CAM_STRVIS_FLAG_NONASCII_ESC; break; } cam_strvis_sbuf(sb, hdr->attribute, print_size, strvis_flags); } else if (avail_len < field_size) { /* * We only report an error if the user didn't allocate * enough space to hold the full value of this field. If * the field length is 0, that is allowed by the spec. * e.g. in SPC-4r37, section 7.4.2.2.5, VOLUME IDENTIFIER * "This attribute indicates the current volume identifier * (see SMC-3) of the medium. If the device server supports * this attribute but does not have access to the volume * identifier, the device server shall report this attribute * with an attribute length value of zero." */ if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; } return (retval); } int scsi_attrib_text_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size, print_size; int retval = 0; int esc_text = 1; avail_len = valid_len - sizeof(*hdr); field_size = scsi_2btoul(hdr->length); print_size = MIN(avail_len, field_size); if ((output_flags & SCSI_ATTR_OUTPUT_TEXT_MASK) == SCSI_ATTR_OUTPUT_TEXT_RAW) esc_text = 0; if (print_size > 0) { uint32_t i; for (i = 0; i < print_size; i++) { if (hdr->attribute[i] == '\0') continue; else if (((unsigned char)hdr->attribute[i] < 0x80) || (esc_text == 0)) sbuf_putc(sb, hdr->attribute[i]); else sbuf_printf(sb, "%%%02x", (unsigned char)hdr->attribute[i]); } } else if (avail_len < field_size) { /* * We only report an error if the user didn't allocate * enough space to hold the full value of this field. */ if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; } return (retval); } struct scsi_attrib_table_entry * scsi_find_attrib_entry(struct scsi_attrib_table_entry *table, size_t num_table_entries, uint32_t id) { uint32_t i; for (i = 0; i < num_table_entries; i++) { if (table[i].id == id) return (&table[i]); } return (NULL); } struct scsi_attrib_table_entry * scsi_get_attrib_entry(uint32_t id) { return (scsi_find_attrib_entry(scsi_mam_attr_table, sizeof(scsi_mam_attr_table) / sizeof(scsi_mam_attr_table[0]), id)); } int scsi_attrib_value_sbuf(struct sbuf *sb, uint32_t valid_len, struct scsi_mam_attribute_header *hdr, uint32_t output_flags, char *error_str, size_t error_str_len) { int retval; switch (hdr->byte2 & SMA_FORMAT_MASK) { case SMA_FORMAT_ASCII: retval = scsi_attrib_ascii_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str,error_str_len); break; case SMA_FORMAT_BINARY: if (scsi_2btoul(hdr->length) <= 8) retval = scsi_attrib_int_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str, error_str_len); else retval = scsi_attrib_hexdump_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str, error_str_len); break; case SMA_FORMAT_TEXT: retval = scsi_attrib_text_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str, error_str_len); break; default: if (error_str != NULL) { snprintf(error_str, error_str_len, "Unknown attribute " "format 0x%x", hdr->byte2 & SMA_FORMAT_MASK); } retval = 1; goto bailout; break; /*NOTREACHED*/ } sbuf_trim(sb); bailout: return (retval); } void scsi_attrib_prefix_sbuf(struct sbuf *sb, uint32_t output_flags, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, const char *desc) { int need_space = 0; uint32_t len; uint32_t id; /* * We can't do anything if we don't have enough valid data for the * header. */ if (valid_len < sizeof(*hdr)) return; id = scsi_2btoul(hdr->id); /* * Note that we print out the value of the attribute listed in the * header, regardless of whether we actually got that many bytes * back from the device through the controller. A truncated result * could be the result of a failure to ask for enough data; the * header indicates how many bytes are allocated for this attribute * in the MAM. */ len = scsi_2btoul(hdr->length); if ((output_flags & SCSI_ATTR_OUTPUT_FIELD_MASK) == SCSI_ATTR_OUTPUT_FIELD_NONE) return; if ((output_flags & SCSI_ATTR_OUTPUT_FIELD_DESC) && (desc != NULL)) { sbuf_printf(sb, "%s", desc); need_space = 1; } if (output_flags & SCSI_ATTR_OUTPUT_FIELD_NUM) { sbuf_printf(sb, "%s(0x%.4x)", (need_space) ? " " : "", id); need_space = 0; } if (output_flags & SCSI_ATTR_OUTPUT_FIELD_SIZE) { sbuf_printf(sb, "%s[%d]", (need_space) ? " " : "", len); need_space = 0; } if (output_flags & SCSI_ATTR_OUTPUT_FIELD_RW) { sbuf_printf(sb, "%s(%s)", (need_space) ? " " : "", (hdr->byte2 & SMA_READ_ONLY) ? "RO" : "RW"); } sbuf_printf(sb, ": "); } int scsi_attrib_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, struct scsi_attrib_table_entry *user_table, size_t num_user_entries, int prefer_user_table, uint32_t output_flags, char *error_str, int error_str_len) { int retval; struct scsi_attrib_table_entry *table1 = NULL, *table2 = NULL; struct scsi_attrib_table_entry *entry = NULL; size_t table1_size = 0, table2_size = 0; uint32_t id; retval = 0; if (valid_len < sizeof(*hdr)) { retval = 1; goto bailout; } id = scsi_2btoul(hdr->id); if (user_table != NULL) { if (prefer_user_table != 0) { table1 = user_table; table1_size = num_user_entries; table2 = scsi_mam_attr_table; table2_size = sizeof(scsi_mam_attr_table) / sizeof(scsi_mam_attr_table[0]); } else { table1 = scsi_mam_attr_table; table1_size = sizeof(scsi_mam_attr_table) / sizeof(scsi_mam_attr_table[0]); table2 = user_table; table2_size = num_user_entries; } } else { table1 = scsi_mam_attr_table; table1_size = sizeof(scsi_mam_attr_table) / sizeof(scsi_mam_attr_table[0]); } entry = scsi_find_attrib_entry(table1, table1_size, id); if (entry != NULL) { scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len, entry->desc); if (entry->to_str == NULL) goto print_default; retval = entry->to_str(sb, hdr, valid_len, entry->flags, output_flags, error_str, error_str_len); goto bailout; } if (table2 != NULL) { entry = scsi_find_attrib_entry(table2, table2_size, id); if (entry != NULL) { if (entry->to_str == NULL) goto print_default; scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len, entry->desc); retval = entry->to_str(sb, hdr, valid_len, entry->flags, output_flags, error_str, error_str_len); goto bailout; } } scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len, NULL); print_default: retval = scsi_attrib_value_sbuf(sb, valid_len, hdr, output_flags, error_str, error_str_len); bailout: if (retval == 0) { if ((entry != NULL) && (entry->suffix != NULL)) sbuf_printf(sb, " %s", entry->suffix); sbuf_trim(sb); sbuf_printf(sb, "\n"); } return (retval); } void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_test_unit_ready *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_test_unit_ready *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = TEST_UNIT_READY; } void scsi_request_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), void *data_ptr, u_int8_t dxfer_len, u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_request_sense *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_request_sense *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REQUEST_SENSE; scsi_cmd->length = dxfer_len; } void scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *inq_buf, u_int32_t inq_len, int evpd, u_int8_t page_code, u_int8_t sense_len, u_int32_t timeout) { struct scsi_inquiry *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/inq_buf, /*dxfer_len*/inq_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_inquiry *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = INQUIRY; if (evpd) { scsi_cmd->byte2 |= SI_EVPD; scsi_cmd->page_code = page_code; } scsi_ulto2b(inq_len, scsi_cmd->length); } void scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int dbd, u_int8_t page_code, u_int8_t page, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { scsi_mode_sense_len(csio, retries, cbfcnp, tag_action, dbd, page_code, page, param_buf, param_len, 0, sense_len, timeout); } void scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int dbd, u_int8_t page_code, u_int8_t page, u_int8_t *param_buf, u_int32_t param_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout) { u_int8_t cdb_len; /* * Use the smallest possible command to perform the operation. */ if ((param_len < 256) && (minimum_cmd_size < 10)) { /* * We can fit in a 6 byte cdb. */ struct scsi_mode_sense_6 *scsi_cmd; scsi_cmd = (struct scsi_mode_sense_6 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SENSE_6; if (dbd != 0) scsi_cmd->byte2 |= SMS_DBD; scsi_cmd->page = page_code | page; scsi_cmd->length = param_len; cdb_len = sizeof(*scsi_cmd); } else { /* * Need a 10 byte cdb. */ struct scsi_mode_sense_10 *scsi_cmd; scsi_cmd = (struct scsi_mode_sense_10 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SENSE_10; if (dbd != 0) scsi_cmd->byte2 |= SMS_DBD; scsi_cmd->page = page_code | page; scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, param_buf, param_len, sense_len, cdb_len, timeout); } void scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int scsi_page_fmt, int save_pages, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { scsi_mode_select_len(csio, retries, cbfcnp, tag_action, scsi_page_fmt, save_pages, param_buf, param_len, 0, sense_len, timeout); } void scsi_mode_select_len(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int scsi_page_fmt, int save_pages, u_int8_t *param_buf, u_int32_t param_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout) { u_int8_t cdb_len; /* * Use the smallest possible command to perform the operation. */ if ((param_len < 256) && (minimum_cmd_size < 10)) { /* * We can fit in a 6 byte cdb. */ struct scsi_mode_select_6 *scsi_cmd; scsi_cmd = (struct scsi_mode_select_6 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SELECT_6; if (scsi_page_fmt != 0) scsi_cmd->byte2 |= SMS_PF; if (save_pages != 0) scsi_cmd->byte2 |= SMS_SP; scsi_cmd->length = param_len; cdb_len = sizeof(*scsi_cmd); } else { /* * Need a 10 byte cdb. */ struct scsi_mode_select_10 *scsi_cmd; scsi_cmd = (struct scsi_mode_select_10 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SELECT_10; if (scsi_page_fmt != 0) scsi_cmd->byte2 |= SMS_PF; if (save_pages != 0) scsi_cmd->byte2 |= SMS_SP; scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_OUT, tag_action, param_buf, param_len, sense_len, cdb_len, timeout); } void scsi_log_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t page_code, u_int8_t page, int save_pages, int ppc, u_int32_t paramptr, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_log_sense *scsi_cmd; u_int8_t cdb_len; scsi_cmd = (struct scsi_log_sense *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOG_SENSE; scsi_cmd->page = page_code | page; if (save_pages != 0) scsi_cmd->byte2 |= SLS_SP; if (ppc != 0) scsi_cmd->byte2 |= SLS_PPC; scsi_ulto2b(paramptr, scsi_cmd->paramptr); scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/param_buf, /*dxfer_len*/param_len, sense_len, cdb_len, timeout); } void scsi_log_select(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t page_code, int save_pages, int pc_reset, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_log_select *scsi_cmd; u_int8_t cdb_len; scsi_cmd = (struct scsi_log_select *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOG_SELECT; scsi_cmd->page = page_code & SLS_PAGE_CODE; if (save_pages != 0) scsi_cmd->byte2 |= SLS_SP; if (pc_reset != 0) scsi_cmd->byte2 |= SLS_PCR; scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/param_buf, /*dxfer_len*/param_len, sense_len, cdb_len, timeout); } /* * Prevent or allow the user to remove the media */ void scsi_prevent(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_prevent *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_prevent *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = PREVENT_ALLOW; scsi_cmd->how = action; } /* XXX allow specification of address and PMI bit and LBA */ void scsi_read_capacity(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, struct scsi_read_capacity_data *rcap_buf, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_capacity *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)rcap_buf, /*dxfer_len*/sizeof(*rcap_buf), sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_read_capacity *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_CAPACITY; } void scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint64_t lba, int reladr, int pmi, uint8_t *rcap_buf, int rcap_buf_len, uint8_t sense_len, uint32_t timeout) { struct scsi_read_capacity_16 *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)rcap_buf, /*dxfer_len*/rcap_buf_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SERVICE_ACTION_IN; scsi_cmd->service_action = SRC16_SERVICE_ACTION; scsi_u64to8b(lba, scsi_cmd->addr); scsi_ulto4b(rcap_buf_len, scsi_cmd->alloc_len); if (pmi) reladr |= SRC16_PMI; if (reladr) reladr |= SRC16_RELADR; } void scsi_report_luns(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t select_report, struct scsi_report_luns_data *rpl_buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_report_luns *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)rpl_buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_report_luns *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_LUNS; scsi_cmd->select_report = select_report; scsi_ulto4b(alloc_len, scsi_cmd->length); } void scsi_report_target_group(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t pdf, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_target_group *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_target_group *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_IN; scsi_cmd->service_action = REPORT_TARGET_PORT_GROUPS | pdf; scsi_ulto4b(alloc_len, scsi_cmd->length); } void scsi_set_target_group(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_target_group *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/(u_int8_t *)buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_target_group *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_OUT; scsi_cmd->service_action = SET_TARGET_PORT_GROUPS; scsi_ulto4b(alloc_len, scsi_cmd->length); } /* * Syncronize the media to the contents of the cache for * the given lba/count pair. Specifying 0/0 means sync * the whole cache. */ void scsi_synchronize_cache(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t begin_lba, u_int16_t lb_count, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sync_cache *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_sync_cache *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SYNCHRONIZE_CACHE; scsi_ulto4b(begin_lba, scsi_cmd->begin_lba); scsi_ulto2b(lb_count, scsi_cmd->lb_count); } void scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int readop, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { int read; u_int8_t cdb_len; read = (readop & SCSI_RW_DIRMASK) == SCSI_RW_READ; /* * Use the smallest possible command to perform the operation * as some legacy hardware does not support the 10 byte commands. * If any of the bits in byte2 is set, we have to go with a larger * command. */ if ((minimum_cmd_size < 10) && ((lba & 0x1fffff) == lba) && ((block_count & 0xff) == block_count) && (byte2 == 0)) { /* * We can fit in a 6 byte cdb. */ struct scsi_rw_6 *scsi_cmd; scsi_cmd = (struct scsi_rw_6 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_6 : WRITE_6; scsi_ulto3b(lba, scsi_cmd->addr); scsi_cmd->length = block_count & 0xff; scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("6byte: %x%x%x:%d:%d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->length, dxfer_len)); } else if ((minimum_cmd_size < 12) && ((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* * Need a 10 byte cdb. */ struct scsi_rw_10 *scsi_cmd; scsi_cmd = (struct scsi_rw_10 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_10 : WRITE_10; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto2b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("10byte: %x%x%x%x:%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->length[0], scsi_cmd->length[1], dxfer_len)); } else if ((minimum_cmd_size < 16) && ((block_count & 0xffffffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* * The block count is too big for a 10 byte CDB, use a 12 * byte CDB. */ struct scsi_rw_12 *scsi_cmd; scsi_cmd = (struct scsi_rw_12 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_12 : WRITE_12; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("12byte: %x%x%x%x:%x%x%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->length[0], scsi_cmd->length[1], scsi_cmd->length[2], scsi_cmd->length[3], dxfer_len)); } else { /* * 16 byte CDB. We'll only get here if the LBA is larger * than 2^32, or if the user asks for a 16 byte command. */ struct scsi_rw_16 *scsi_cmd; scsi_cmd = (struct scsi_rw_16 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_16 : WRITE_16; scsi_cmd->byte2 = byte2; scsi_u64to8b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, retries, cbfcnp, (read ? CAM_DIR_IN : CAM_DIR_OUT) | ((readop & SCSI_RW_BIO) != 0 ? CAM_DATA_BIO : 0), tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_write_same(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { u_int8_t cdb_len; if ((minimum_cmd_size < 16) && ((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* * Need a 10 byte cdb. */ struct scsi_write_same_10 *scsi_cmd; scsi_cmd = (struct scsi_write_same_10 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = WRITE_SAME_10; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->group = 0; scsi_ulto2b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("10byte: %x%x%x%x:%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->length[0], scsi_cmd->length[1], dxfer_len)); } else { /* * 16 byte CDB. We'll only get here if the LBA is larger * than 2^32, or if the user asks for a 16 byte command. */ struct scsi_write_same_16 *scsi_cmd; scsi_cmd = (struct scsi_write_same_16 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = WRITE_SAME_16; scsi_cmd->byte2 = byte2; scsi_u64to8b(lba, scsi_cmd->addr); scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->group = 0; scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("16byte: %x%x%x%x%x%x%x%x:%x%x%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->addr[4], scsi_cmd->addr[5], scsi_cmd->addr[6], scsi_cmd->addr[7], scsi_cmd->length[0], scsi_cmd->length[1], scsi_cmd->length[2], scsi_cmd->length[3], dxfer_len)); } cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_ata_identify(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { scsi_ata_pass_16(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*protocol*/AP_PROTO_PIO_IN, /*ata_flags*/AP_FLAG_TDIR_FROM_DEV| AP_FLAG_BYT_BLOK_BYTES|AP_FLAG_TLEN_SECT_CNT, /*features*/0, /*sector_count*/dxfer_len, /*lba*/0, /*command*/ATA_ATA_IDENTIFY, /*control*/0, data_ptr, dxfer_len, sense_len, timeout); } void scsi_ata_trim(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int16_t block_count, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { scsi_ata_pass_16(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*protocol*/AP_EXTEND|AP_PROTO_DMA, /*ata_flags*/AP_FLAG_TLEN_SECT_CNT|AP_FLAG_BYT_BLOK_BLOCKS, /*features*/ATA_DSM_TRIM, /*sector_count*/block_count, /*lba*/0, /*command*/ATA_DATA_SET_MANAGEMENT, /*control*/0, data_ptr, dxfer_len, sense_len, timeout); } void scsi_ata_pass_16(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t protocol, u_int8_t ata_flags, u_int16_t features, u_int16_t sector_count, uint64_t lba, u_int8_t command, u_int8_t control, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct ata_pass_16 *ata_cmd; ata_cmd = (struct ata_pass_16 *)&csio->cdb_io.cdb_bytes; ata_cmd->opcode = ATA_PASS_16; ata_cmd->protocol = protocol; ata_cmd->flags = ata_flags; ata_cmd->features_ext = features >> 8; ata_cmd->features = features; ata_cmd->sector_count_ext = sector_count >> 8; ata_cmd->sector_count = sector_count; ata_cmd->lba_low = lba; ata_cmd->lba_mid = lba >> 8; ata_cmd->lba_high = lba >> 16; ata_cmd->device = ATA_DEV_LBA; if (protocol & AP_EXTEND) { ata_cmd->lba_low_ext = lba >> 24; ata_cmd->lba_mid_ext = lba >> 32; ata_cmd->lba_high_ext = lba >> 40; } else ata_cmd->device |= (lba >> 24) & 0x0f; ata_cmd->command = command; ata_cmd->control = control; cam_fill_csio(csio, retries, cbfcnp, flags, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*ata_cmd), timeout); } void scsi_unmap(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_unmap *scsi_cmd; scsi_cmd = (struct scsi_unmap *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = UNMAP; scsi_cmd->byte2 = byte2; scsi_ulto4b(0, scsi_cmd->reserved); scsi_cmd->group = 0; scsi_ulto2b(dxfer_len, scsi_cmd->length); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_receive_diagnostic_results(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int pcv, uint8_t page_code, uint8_t *data_ptr, uint16_t allocation_length, uint8_t sense_len, uint32_t timeout) { struct scsi_receive_diag *scsi_cmd; scsi_cmd = (struct scsi_receive_diag *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = RECEIVE_DIAGNOSTIC; if (pcv) { scsi_cmd->byte2 |= SRD_PCV; scsi_cmd->page_code = page_code; } scsi_ulto2b(allocation_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, allocation_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_diagnostic(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int unit_offline, int device_offline, int self_test, int page_format, int self_test_code, uint8_t *data_ptr, uint16_t param_list_length, uint8_t sense_len, uint32_t timeout) { struct scsi_send_diag *scsi_cmd; scsi_cmd = (struct scsi_send_diag *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_DIAGNOSTIC; /* * The default self-test mode control and specific test * control are mutually exclusive. */ if (self_test) self_test_code = SSD_SELF_TEST_CODE_NONE; scsi_cmd->byte2 = ((self_test_code << SSD_SELF_TEST_CODE_SHIFT) & SSD_SELF_TEST_CODE_MASK) | (unit_offline ? SSD_UNITOFFL : 0) | (device_offline ? SSD_DEVOFFL : 0) | (self_test ? SSD_SELFTEST : 0) | (page_format ? SSD_PF : 0); scsi_ulto2b(param_list_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/param_list_length ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, param_list_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_buffer(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int mode, uint8_t buffer_id, u_int32_t offset, uint8_t *data_ptr, uint32_t allocation_length, uint8_t sense_len, uint32_t timeout) { struct scsi_read_buffer *scsi_cmd; scsi_cmd = (struct scsi_read_buffer *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_BUFFER; scsi_cmd->byte2 = mode; scsi_cmd->buffer_id = buffer_id; scsi_ulto3b(offset, scsi_cmd->offset); scsi_ulto3b(allocation_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, allocation_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_write_buffer(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int mode, uint8_t buffer_id, u_int32_t offset, uint8_t *data_ptr, uint32_t param_list_length, uint8_t sense_len, uint32_t timeout) { struct scsi_write_buffer *scsi_cmd; scsi_cmd = (struct scsi_write_buffer *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = WRITE_BUFFER; scsi_cmd->byte2 = mode; scsi_cmd->buffer_id = buffer_id; scsi_ulto3b(offset, scsi_cmd->offset); scsi_ulto3b(param_list_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/param_list_length ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, param_list_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int start, int load_eject, int immediate, u_int8_t sense_len, u_int32_t timeout) { struct scsi_start_stop_unit *scsi_cmd; int extra_flags = 0; scsi_cmd = (struct scsi_start_stop_unit *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = START_STOP_UNIT; if (start != 0) { scsi_cmd->how |= SSS_START; /* it takes a lot of power to start a drive */ extra_flags |= CAM_HIGH_POWER; } if (load_eject != 0) scsi_cmd->how |= SSS_LOEJ; if (immediate != 0) scsi_cmd->byte2 |= SSS_IMMED; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE | extra_flags, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_attribute(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t service_action, uint32_t element, u_int8_t elem_type, int logical_volume, int partition, u_int32_t first_attribute, int cache, u_int8_t *data_ptr, u_int32_t length, int sense_len, u_int32_t timeout) { struct scsi_read_attribute *scsi_cmd; scsi_cmd = (struct scsi_read_attribute *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_ATTRIBUTE; scsi_cmd->service_action = service_action, scsi_ulto2b(element, scsi_cmd->element); scsi_cmd->elem_type = elem_type; scsi_cmd->logical_volume = logical_volume; scsi_cmd->partition = partition; scsi_ulto2b(first_attribute, scsi_cmd->first_attribute); scsi_ulto4b(length, scsi_cmd->length); if (cache != 0) scsi_cmd->cache |= SRA_CACHE; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_write_attribute(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, uint32_t element, int logical_volume, int partition, int wtc, u_int8_t *data_ptr, u_int32_t length, int sense_len, u_int32_t timeout) { struct scsi_write_attribute *scsi_cmd; scsi_cmd = (struct scsi_write_attribute *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = WRITE_ATTRIBUTE; if (wtc != 0) scsi_cmd->byte2 = SWA_WTC; scsi_ulto3b(element, scsi_cmd->element); scsi_cmd->logical_volume = logical_volume; scsi_cmd->partition = partition; scsi_ulto4b(length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_persistent_reserve_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int service_action, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_per_res_in *scsi_cmd; scsi_cmd = (struct scsi_per_res_in *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = PERSISTENT_RES_IN; scsi_cmd->action = service_action; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_persistent_reserve_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int service_action, int scope, int res_type, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_per_res_out *scsi_cmd; scsi_cmd = (struct scsi_per_res_out *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = PERSISTENT_RES_OUT; scsi_cmd->action = service_action; scsi_cmd->scope_type = scope | res_type; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_security_protocol_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint32_t security_protocol, uint32_t security_protocol_specific, int byte4, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_security_protocol_in *scsi_cmd; scsi_cmd = (struct scsi_security_protocol_in *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SECURITY_PROTOCOL_IN; scsi_cmd->security_protocol = security_protocol; scsi_ulto2b(security_protocol_specific, scsi_cmd->security_protocol_specific); scsi_cmd->byte4 = byte4; scsi_ulto4b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_security_protocol_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint32_t security_protocol, uint32_t security_protocol_specific, int byte4, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_security_protocol_out *scsi_cmd; scsi_cmd = (struct scsi_security_protocol_out *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SECURITY_PROTOCOL_OUT; scsi_cmd->security_protocol = security_protocol; scsi_ulto2b(security_protocol_specific, scsi_cmd->security_protocol_specific); scsi_cmd->byte4 = byte4; scsi_ulto4b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_report_supported_opcodes(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int options, int req_opcode, int req_service_action, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_report_supported_opcodes *scsi_cmd; scsi_cmd = (struct scsi_report_supported_opcodes *) &csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_IN; scsi_cmd->service_action = REPORT_SUPPORTED_OPERATION_CODES; scsi_cmd->options = options; scsi_cmd->requested_opcode = req_opcode; scsi_ulto2b(req_service_action, scsi_cmd->requested_service_action); scsi_ulto4b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } /* * Try make as good a match as possible with * available sub drivers */ int scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry) { struct scsi_inquiry_pattern *entry; struct scsi_inquiry_data *inq; entry = (struct scsi_inquiry_pattern *)table_entry; inq = (struct scsi_inquiry_data *)inqbuffer; if (((SID_TYPE(inq) == entry->type) || (entry->type == T_ANY)) && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE : entry->media_type & SIP_MEDIA_FIXED) && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0) && (cam_strmatch(inq->product, entry->product, sizeof(inq->product)) == 0) && (cam_strmatch(inq->revision, entry->revision, sizeof(inq->revision)) == 0)) { return (0); } return (-1); } /* * Try make as good a match as possible with * available sub drivers */ int scsi_static_inquiry_match(caddr_t inqbuffer, caddr_t table_entry) { struct scsi_static_inquiry_pattern *entry; struct scsi_inquiry_data *inq; entry = (struct scsi_static_inquiry_pattern *)table_entry; inq = (struct scsi_inquiry_data *)inqbuffer; if (((SID_TYPE(inq) == entry->type) || (entry->type == T_ANY)) && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE : entry->media_type & SIP_MEDIA_FIXED) && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0) && (cam_strmatch(inq->product, entry->product, sizeof(inq->product)) == 0) && (cam_strmatch(inq->revision, entry->revision, sizeof(inq->revision)) == 0)) { return (0); } return (-1); } /** * Compare two buffers of vpd device descriptors for a match. * * \param lhs Pointer to first buffer of descriptors to compare. * \param lhs_len The length of the first buffer. * \param rhs Pointer to second buffer of descriptors to compare. * \param rhs_len The length of the second buffer. * * \return 0 on a match, -1 otherwise. * * Treat rhs and lhs as arrays of vpd device id descriptors. Walk lhs matching * agains each element in rhs until all data are exhausted or we have found * a match. */ int scsi_devid_match(uint8_t *lhs, size_t lhs_len, uint8_t *rhs, size_t rhs_len) { struct scsi_vpd_id_descriptor *lhs_id; struct scsi_vpd_id_descriptor *lhs_last; struct scsi_vpd_id_descriptor *rhs_last; uint8_t *lhs_end; uint8_t *rhs_end; lhs_end = lhs + lhs_len; rhs_end = rhs + rhs_len; /* * rhs_last and lhs_last are the last posible position of a valid * descriptor assuming it had a zero length identifier. We use * these variables to insure we can safely dereference the length * field in our loop termination tests. */ lhs_last = (struct scsi_vpd_id_descriptor *) (lhs_end - __offsetof(struct scsi_vpd_id_descriptor, identifier)); rhs_last = (struct scsi_vpd_id_descriptor *) (rhs_end - __offsetof(struct scsi_vpd_id_descriptor, identifier)); lhs_id = (struct scsi_vpd_id_descriptor *)lhs; while (lhs_id <= lhs_last && (lhs_id->identifier + lhs_id->length) <= lhs_end) { struct scsi_vpd_id_descriptor *rhs_id; rhs_id = (struct scsi_vpd_id_descriptor *)rhs; while (rhs_id <= rhs_last && (rhs_id->identifier + rhs_id->length) <= rhs_end) { if ((rhs_id->id_type & (SVPD_ID_ASSOC_MASK | SVPD_ID_TYPE_MASK)) == (lhs_id->id_type & (SVPD_ID_ASSOC_MASK | SVPD_ID_TYPE_MASK)) && rhs_id->length == lhs_id->length && memcmp(rhs_id->identifier, lhs_id->identifier, rhs_id->length) == 0) return (0); rhs_id = (struct scsi_vpd_id_descriptor *) (rhs_id->identifier + rhs_id->length); } lhs_id = (struct scsi_vpd_id_descriptor *) (lhs_id->identifier + lhs_id->length); } return (-1); } #ifdef _KERNEL int scsi_vpd_supported_page(struct cam_periph *periph, uint8_t page_id) { struct cam_ed *device; struct scsi_vpd_supported_pages *vpds; int i, num_pages; device = periph->path->device; vpds = (struct scsi_vpd_supported_pages *)device->supported_vpds; if (vpds != NULL) { num_pages = device->supported_vpds_len - SVPD_SUPPORTED_PAGES_HDR_LEN; for (i = 0; i < num_pages; i++) { if (vpds->page_list[i] == page_id) return (1); } } return (0); } static void init_scsi_delay(void) { int delay; delay = SCSI_DELAY; TUNABLE_INT_FETCH("kern.cam.scsi_delay", &delay); if (set_scsi_delay(delay) != 0) { printf("cam: invalid value for tunable kern.cam.scsi_delay\n"); set_scsi_delay(SCSI_DELAY); } } SYSINIT(scsi_delay, SI_SUB_TUNABLES, SI_ORDER_ANY, init_scsi_delay, NULL); static int sysctl_scsi_delay(SYSCTL_HANDLER_ARGS) { int error, delay; delay = scsi_delay; error = sysctl_handle_int(oidp, &delay, 0, req); if (error != 0 || req->newptr == NULL) return (error); return (set_scsi_delay(delay)); } SYSCTL_PROC(_kern_cam, OID_AUTO, scsi_delay, CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_scsi_delay, "I", "Delay to allow devices to settle after a SCSI bus reset (ms)"); static int set_scsi_delay(int delay) { /* * If someone sets this to 0, we assume that they want the * minimum allowable bus settle delay. */ if (delay == 0) { printf("cam: using minimum scsi_delay (%dms)\n", SCSI_MIN_DELAY); delay = SCSI_MIN_DELAY; } if (delay < SCSI_MIN_DELAY) return (EINVAL); scsi_delay = delay; return (0); } #endif /* _KERNEL */ Index: projects/release-pkg/sys/cam/scsi/scsi_all.h =================================================================== --- projects/release-pkg/sys/cam/scsi/scsi_all.h (revision 297926) +++ projects/release-pkg/sys/cam/scsi/scsi_all.h (revision 297927) @@ -1,4188 +1,4189 @@ /*- * Largely written by Julian Elischer (julian@tfs.com) * for TRW Financial Systems. * * TRW Financial Systems, in accordance with their agreement with Carnegie * Mellon University, makes this software available to CMU to distribute * or use in any manner that they see fit as long as this message is kept with * the software. For this reason TFS also grants any other persons or * organisations permission to use or modify this software. * * TFS supplies this software to be publicly redistributed * on the understanding that TFS is not responsible for the correct * functioning of this software in any circumstances. * * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 * * $FreeBSD$ */ /* * SCSI general interface description */ #ifndef _SCSI_SCSI_ALL_H #define _SCSI_SCSI_ALL_H 1 #include #include #ifdef _KERNEL /* * This is the number of seconds we wait for devices to settle after a SCSI * bus reset. */ extern int scsi_delay; #endif /* _KERNEL */ /* * SCSI command format */ /* * Define dome bits that are in ALL (or a lot of) scsi commands */ #define SCSI_CTL_LINK 0x01 #define SCSI_CTL_FLAG 0x02 #define SCSI_CTL_VENDOR 0xC0 #define SCSI_CMD_LUN 0xA0 /* these two should not be needed */ #define SCSI_CMD_LUN_SHIFT 5 /* LUN in the cmd is no longer SCSI */ #define SCSI_MAX_CDBLEN 16 /* * 16 byte commands are in the * SCSI-3 spec */ #if defined(CAM_MAX_CDBLEN) && (CAM_MAX_CDBLEN < SCSI_MAX_CDBLEN) #error "CAM_MAX_CDBLEN cannot be less than SCSI_MAX_CDBLEN" #endif /* 6byte CDBs special case 0 length to be 256 */ #define SCSI_CDB6_LEN(len) ((len) == 0 ? 256 : len) /* * This type defines actions to be taken when a particular sense code is * received. Right now, these flags are only defined to take up 16 bits, * but can be expanded in the future if necessary. */ typedef enum { SS_NOP = 0x000000, /* Do nothing */ SS_RETRY = 0x010000, /* Retry the command */ SS_FAIL = 0x020000, /* Bail out */ SS_START = 0x030000, /* Send a Start Unit command to the device, * then retry the original command. */ SS_TUR = 0x040000, /* Send a Test Unit Ready command to the * device, then retry the original command. */ SS_MASK = 0xff0000 } scsi_sense_action; typedef enum { SSQ_NONE = 0x0000, SSQ_DECREMENT_COUNT = 0x0100, /* Decrement the retry count */ SSQ_MANY = 0x0200, /* send lots of recovery commands */ SSQ_RANGE = 0x0400, /* * This table entry represents the * end of a range of ASCQs that * have identical error actions * and text. */ SSQ_PRINT_SENSE = 0x0800, SSQ_UA = 0x1000, /* Broadcast UA. */ SSQ_RESCAN = 0x2000, /* Rescan target for LUNs. */ SSQ_LOST = 0x4000, /* Destroy the LUNs. */ SSQ_MASK = 0xff00 } scsi_sense_action_qualifier; /* Mask for error status values */ #define SS_ERRMASK 0xff /* The default, retyable, error action */ #define SS_RDEF SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE|EIO /* The retyable, error action, with table specified error code */ #define SS_RET SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE /* Wait for transient error status to change */ #define SS_WAIT SS_TUR|SSQ_MANY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE /* Fatal error action, with table specified error code */ #define SS_FATAL SS_FAIL|SSQ_PRINT_SENSE struct scsi_generic { u_int8_t opcode; u_int8_t bytes[11]; }; struct scsi_request_sense { u_int8_t opcode; u_int8_t byte2; #define SRS_DESC 0x01 u_int8_t unused[2]; u_int8_t length; u_int8_t control; }; struct scsi_test_unit_ready { u_int8_t opcode; u_int8_t byte2; u_int8_t unused[3]; u_int8_t control; }; struct scsi_receive_diag { uint8_t opcode; uint8_t byte2; #define SRD_PCV 0x01 uint8_t page_code; uint8_t length[2]; uint8_t control; }; struct scsi_send_diag { uint8_t opcode; uint8_t byte2; #define SSD_UNITOFFL 0x01 #define SSD_DEVOFFL 0x02 #define SSD_SELFTEST 0x04 #define SSD_PF 0x10 #define SSD_SELF_TEST_CODE_MASK 0xE0 #define SSD_SELF_TEST_CODE_SHIFT 5 #define SSD_SELF_TEST_CODE_NONE 0x00 #define SSD_SELF_TEST_CODE_BG_SHORT 0x01 #define SSD_SELF_TEST_CODE_BG_EXTENDED 0x02 #define SSD_SELF_TEST_CODE_BG_ABORT 0x04 #define SSD_SELF_TEST_CODE_FG_SHORT 0x05 #define SSD_SELF_TEST_CODE_FG_EXTENDED 0x06 uint8_t reserved; uint8_t length[2]; uint8_t control; }; struct scsi_sense { u_int8_t opcode; u_int8_t byte2; u_int8_t unused[2]; u_int8_t length; u_int8_t control; }; struct scsi_inquiry { u_int8_t opcode; u_int8_t byte2; #define SI_EVPD 0x01 #define SI_CMDDT 0x02 u_int8_t page_code; u_int8_t length[2]; u_int8_t control; }; struct scsi_mode_sense_6 { u_int8_t opcode; u_int8_t byte2; #define SMS_DBD 0x08 u_int8_t page; #define SMS_PAGE_CODE 0x3F #define SMS_VENDOR_SPECIFIC_PAGE 0x00 #define SMS_DISCONNECT_RECONNECT_PAGE 0x02 #define SMS_FORMAT_DEVICE_PAGE 0x03 #define SMS_GEOMETRY_PAGE 0x04 #define SMS_CACHE_PAGE 0x08 #define SMS_PERIPHERAL_DEVICE_PAGE 0x09 #define SMS_CONTROL_MODE_PAGE 0x0A #define SMS_PROTO_SPECIFIC_PAGE 0x19 #define SMS_INFO_EXCEPTIONS_PAGE 0x1C #define SMS_ALL_PAGES_PAGE 0x3F #define SMS_PAGE_CTRL_MASK 0xC0 #define SMS_PAGE_CTRL_CURRENT 0x00 #define SMS_PAGE_CTRL_CHANGEABLE 0x40 #define SMS_PAGE_CTRL_DEFAULT 0x80 #define SMS_PAGE_CTRL_SAVED 0xC0 u_int8_t subpage; #define SMS_SUBPAGE_PAGE_0 0x00 #define SMS_SUBPAGE_ALL 0xff u_int8_t length; u_int8_t control; }; struct scsi_mode_sense_10 { u_int8_t opcode; u_int8_t byte2; /* same bits as small version */ #define SMS10_LLBAA 0x10 u_int8_t page; /* same bits as small version */ u_int8_t subpage; u_int8_t unused[3]; u_int8_t length[2]; u_int8_t control; }; struct scsi_mode_select_6 { u_int8_t opcode; u_int8_t byte2; #define SMS_SP 0x01 #define SMS_PF 0x10 u_int8_t unused[2]; u_int8_t length; u_int8_t control; }; struct scsi_mode_select_10 { u_int8_t opcode; u_int8_t byte2; /* same bits as small version */ u_int8_t unused[5]; u_int8_t length[2]; u_int8_t control; }; /* * When sending a mode select to a tape drive, the medium type must be 0. */ struct scsi_mode_hdr_6 { u_int8_t datalen; u_int8_t medium_type; u_int8_t dev_specific; u_int8_t block_descr_len; }; struct scsi_mode_hdr_10 { u_int8_t datalen[2]; u_int8_t medium_type; u_int8_t dev_specific; u_int8_t reserved[2]; u_int8_t block_descr_len[2]; }; struct scsi_mode_block_descr { u_int8_t density_code; u_int8_t num_blocks[3]; u_int8_t reserved; u_int8_t block_len[3]; }; struct scsi_per_res_in { u_int8_t opcode; u_int8_t action; #define SPRI_RK 0x00 #define SPRI_RR 0x01 #define SPRI_RC 0x02 #define SPRI_RS 0x03 u_int8_t reserved[5]; u_int8_t length[2]; #define SPRI_MAX_LEN 0xffff u_int8_t control; }; struct scsi_per_res_in_header { u_int8_t generation[4]; u_int8_t length[4]; }; struct scsi_per_res_key { u_int8_t key[8]; }; struct scsi_per_res_in_keys { struct scsi_per_res_in_header header; struct scsi_per_res_key keys[0]; }; struct scsi_per_res_cap { uint8_t length[2]; uint8_t flags1; #define SPRI_RLR_C 0x80 #define SPRI_CRH 0x10 #define SPRI_SIP_C 0x08 #define SPRI_ATP_C 0x04 #define SPRI_PTPL_C 0x01 uint8_t flags2; #define SPRI_TMV 0x80 #define SPRI_ALLOW_CMD_MASK 0x70 #define SPRI_ALLOW_CMD_SHIFT 4 #define SPRI_ALLOW_NA 0x00 #define SPRI_ALLOW_1 0x10 #define SPRI_ALLOW_2 0x20 #define SPRI_ALLOW_3 0x30 #define SPRI_ALLOW_4 0x40 #define SPRI_ALLOW_5 0x50 #define SPRI_PTPL_A 0x01 uint8_t type_mask[2]; #define SPRI_TM_WR_EX_AR 0x8000 #define SPRI_TM_EX_AC_RO 0x4000 #define SPRI_TM_WR_EX_RO 0x2000 #define SPRI_TM_EX_AC 0x0800 #define SPRI_TM_WR_EX 0x0200 #define SPRI_TM_EX_AC_AR 0x0001 uint8_t reserved[2]; }; struct scsi_per_res_in_rsrv_data { uint8_t reservation[8]; uint8_t scope_addr[4]; uint8_t reserved; uint8_t scopetype; #define SPRT_WE 0x01 #define SPRT_EA 0x03 #define SPRT_WERO 0x05 #define SPRT_EARO 0x06 #define SPRT_WEAR 0x07 #define SPRT_EAAR 0x08 uint8_t extent_length[2]; }; struct scsi_per_res_in_rsrv { struct scsi_per_res_in_header header; struct scsi_per_res_in_rsrv_data data; }; struct scsi_per_res_in_full_desc { struct scsi_per_res_key res_key; uint8_t reserved1[4]; uint8_t flags; #define SPRI_FULL_ALL_TG_PT 0x02 #define SPRI_FULL_R_HOLDER 0x01 uint8_t scopetype; uint8_t reserved2[4]; uint8_t rel_trgt_port_id[2]; uint8_t additional_length[4]; uint8_t transport_id[]; }; struct scsi_per_res_in_full { struct scsi_per_res_in_header header; struct scsi_per_res_in_full_desc desc[]; }; struct scsi_per_res_out { u_int8_t opcode; u_int8_t action; #define SPRO_REGISTER 0x00 #define SPRO_RESERVE 0x01 #define SPRO_RELEASE 0x02 #define SPRO_CLEAR 0x03 #define SPRO_PREEMPT 0x04 #define SPRO_PRE_ABO 0x05 #define SPRO_REG_IGNO 0x06 #define SPRO_REG_MOVE 0x07 #define SPRO_REPL_LOST_RES 0x08 #define SPRO_ACTION_MASK 0x1f u_int8_t scope_type; #define SPR_SCOPE_MASK 0xf0 #define SPR_SCOPE_SHIFT 4 #define SPR_LU_SCOPE 0x00 #define SPR_EXTENT_SCOPE 0x10 #define SPR_ELEMENT_SCOPE 0x20 #define SPR_TYPE_MASK 0x0f #define SPR_TYPE_RD_SHARED 0x00 #define SPR_TYPE_WR_EX 0x01 #define SPR_TYPE_RD_EX 0x02 #define SPR_TYPE_EX_AC 0x03 #define SPR_TYPE_SHARED 0x04 #define SPR_TYPE_WR_EX_RO 0x05 #define SPR_TYPE_EX_AC_RO 0x06 #define SPR_TYPE_WR_EX_AR 0x07 #define SPR_TYPE_EX_AC_AR 0x08 u_int8_t reserved[2]; u_int8_t length[4]; u_int8_t control; }; struct scsi_per_res_out_parms { struct scsi_per_res_key res_key; u_int8_t serv_act_res_key[8]; u_int8_t scope_spec_address[4]; u_int8_t flags; #define SPR_SPEC_I_PT 0x08 #define SPR_ALL_TG_PT 0x04 #define SPR_APTPL 0x01 u_int8_t reserved1; u_int8_t extent_length[2]; u_int8_t transport_id_list[]; }; struct scsi_per_res_out_trans_ids { u_int8_t additional_length[4]; u_int8_t transport_ids[]; }; /* * Used with REGISTER AND MOVE serivce action of the PERSISTENT RESERVE OUT * command. */ struct scsi_per_res_reg_move { struct scsi_per_res_key res_key; u_int8_t serv_act_res_key[8]; u_int8_t reserved; u_int8_t flags; #define SPR_REG_MOVE_UNREG 0x02 #define SPR_REG_MOVE_APTPL 0x01 u_int8_t rel_trgt_port_id[2]; u_int8_t transport_id_length[4]; u_int8_t transport_id[]; }; struct scsi_transportid_header { uint8_t format_protocol; #define SCSI_TRN_FORMAT_MASK 0xc0 #define SCSI_TRN_FORMAT_SHIFT 6 #define SCSI_TRN_PROTO_MASK 0x0f }; struct scsi_transportid_fcp { uint8_t format_protocol; #define SCSI_TRN_FCP_FORMAT_DEFAULT 0x00 uint8_t reserved1[7]; uint8_t n_port_name[8]; uint8_t reserved2[8]; }; struct scsi_transportid_spi { uint8_t format_protocol; #define SCSI_TRN_SPI_FORMAT_DEFAULT 0x00 uint8_t reserved1; uint8_t scsi_addr[2]; uint8_t obsolete[2]; uint8_t rel_trgt_port_id[2]; uint8_t reserved2[16]; }; struct scsi_transportid_1394 { uint8_t format_protocol; #define SCSI_TRN_1394_FORMAT_DEFAULT 0x00 uint8_t reserved1[7]; uint8_t eui64[8]; uint8_t reserved2[8]; }; struct scsi_transportid_rdma { uint8_t format_protocol; #define SCSI_TRN_RDMA_FORMAT_DEFAULT 0x00 uint8_t reserved[7]; #define SCSI_TRN_RDMA_PORT_LEN 16 uint8_t initiator_port_id[SCSI_TRN_RDMA_PORT_LEN]; }; struct scsi_transportid_iscsi_device { uint8_t format_protocol; #define SCSI_TRN_ISCSI_FORMAT_DEVICE 0x00 uint8_t reserved; uint8_t additional_length[2]; uint8_t iscsi_name[]; }; struct scsi_transportid_iscsi_port { uint8_t format_protocol; #define SCSI_TRN_ISCSI_FORMAT_PORT 0x40 uint8_t reserved; uint8_t additional_length[2]; uint8_t iscsi_name[]; /* * Followed by a separator and iSCSI initiator session ID */ }; struct scsi_transportid_sas { uint8_t format_protocol; #define SCSI_TRN_SAS_FORMAT_DEFAULT 0x00 uint8_t reserved1[3]; uint8_t sas_address[8]; uint8_t reserved2[12]; }; struct scsi_sop_routing_id_norm { uint8_t bus; uint8_t devfunc; #define SCSI_TRN_SOP_BUS_MAX 0xff #define SCSI_TRN_SOP_DEV_MAX 0x1f #define SCSI_TRN_SOP_DEV_MASK 0xf8 #define SCSI_TRN_SOP_DEV_SHIFT 3 #define SCSI_TRN_SOP_FUNC_NORM_MASK 0x07 #define SCSI_TRN_SOP_FUNC_NORM_MAX 0x07 }; struct scsi_sop_routing_id_alt { uint8_t bus; uint8_t function; #define SCSI_TRN_SOP_FUNC_ALT_MAX 0xff }; struct scsi_transportid_sop { uint8_t format_protocol; #define SCSI_TRN_SOP_FORMAT_DEFAULT 0x00 uint8_t reserved1; uint8_t routing_id[2]; uint8_t reserved2[20]; }; struct scsi_log_sense { u_int8_t opcode; u_int8_t byte2; #define SLS_SP 0x01 #define SLS_PPC 0x02 u_int8_t page; #define SLS_PAGE_CODE 0x3F #define SLS_SUPPORTED_PAGES_PAGE 0x00 #define SLS_OVERRUN_PAGE 0x01 #define SLS_ERROR_WRITE_PAGE 0x02 #define SLS_ERROR_READ_PAGE 0x03 #define SLS_ERROR_READREVERSE_PAGE 0x04 #define SLS_ERROR_VERIFY_PAGE 0x05 #define SLS_ERROR_NONMEDIUM_PAGE 0x06 #define SLS_ERROR_LASTN_PAGE 0x07 #define SLS_LOGICAL_BLOCK_PROVISIONING 0x0c #define SLS_SELF_TEST_PAGE 0x10 #define SLS_STAT_AND_PERF 0x19 #define SLS_IE_PAGE 0x2f #define SLS_PAGE_CTRL_MASK 0xC0 #define SLS_PAGE_CTRL_THRESHOLD 0x00 #define SLS_PAGE_CTRL_CUMULATIVE 0x40 #define SLS_PAGE_CTRL_THRESH_DEFAULT 0x80 #define SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0 u_int8_t subpage; #define SLS_SUPPORTED_SUBPAGES_SUBPAGE 0xff u_int8_t reserved; u_int8_t paramptr[2]; u_int8_t length[2]; u_int8_t control; }; struct scsi_log_select { u_int8_t opcode; u_int8_t byte2; /* SLS_SP 0x01 */ #define SLS_PCR 0x02 u_int8_t page; /* SLS_PAGE_CTRL_MASK 0xC0 */ /* SLS_PAGE_CTRL_THRESHOLD 0x00 */ /* SLS_PAGE_CTRL_CUMULATIVE 0x40 */ /* SLS_PAGE_CTRL_THRESH_DEFAULT 0x80 */ /* SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0 */ u_int8_t reserved[4]; u_int8_t length[2]; u_int8_t control; }; struct scsi_log_header { u_int8_t page; #define SL_PAGE_CODE 0x3F #define SL_SPF 0x40 #define SL_DS 0x80 u_int8_t subpage; u_int8_t datalen[2]; }; struct scsi_log_param_header { u_int8_t param_code[2]; u_int8_t param_control; #define SLP_LP 0x01 #define SLP_LBIN 0x02 #define SLP_TMC_MASK 0x0C #define SLP_TMC_ALWAYS 0x00 #define SLP_TMC_EQUAL 0x04 #define SLP_TMC_NOTEQUAL 0x08 #define SLP_TMC_GREATER 0x0C #define SLP_ETC 0x10 #define SLP_TSD 0x20 #define SLP_DS 0x40 #define SLP_DU 0x80 u_int8_t param_len; }; struct scsi_log_stat_and_perf { struct scsi_log_param_header hdr; #define SLP_SAP 0x0001 uint8_t read_num[8]; uint8_t write_num[8]; uint8_t recvieved_lba[8]; uint8_t transmitted_lba[8]; uint8_t read_int[8]; uint8_t write_int[8]; uint8_t weighted_num[8]; uint8_t weighted_int[8]; }; struct scsi_log_idle_time { struct scsi_log_param_header hdr; #define SLP_IT 0x0002 uint8_t idle_int[8]; }; struct scsi_log_time_interval { struct scsi_log_param_header hdr; #define SLP_TI 0x0003 uint8_t exponent[4]; uint8_t integer[4]; }; struct scsi_log_fua_stat_and_perf { struct scsi_log_param_header hdr; #define SLP_FUA_SAP 0x0004 uint8_t fua_read_num[8]; uint8_t fua_write_num[8]; uint8_t fuanv_read_num[8]; uint8_t fuanv_write_num[8]; uint8_t fua_read_int[8]; uint8_t fua_write_int[8]; uint8_t fuanv_read_int[8]; uint8_t fuanv_write_int[8]; }; struct scsi_control_page { u_int8_t page_code; u_int8_t page_length; u_int8_t rlec; #define SCP_RLEC 0x01 /*Report Log Exception Cond*/ #define SCP_GLTSD 0x02 /*Global Logging target save disable */ #define SCP_DSENSE 0x04 /*Descriptor Sense */ #define SCP_DPICZ 0x08 /*Disable Prot. Info Check if Prot. Field is Zero */ #define SCP_TMF_ONLY 0x10 /*TM Functions Only*/ #define SCP_TST_MASK 0xE0 /*Task Set Type Mask*/ #define SCP_TST_ONE 0x00 /*One Task Set*/ #define SCP_TST_SEPARATE 0x20 /*Separate Task Sets*/ u_int8_t queue_flags; #define SCP_QUEUE_ALG_MASK 0xF0 #define SCP_QUEUE_ALG_RESTRICTED 0x00 #define SCP_QUEUE_ALG_UNRESTRICTED 0x10 #define SCP_NUAR 0x08 /*No UA on release*/ #define SCP_QUEUE_ERR 0x02 /*Queued I/O aborted for CACs*/ #define SCP_QUEUE_DQUE 0x01 /*Queued I/O disabled*/ u_int8_t eca_and_aen; #define SCP_EECA 0x80 /*Enable Extended CA*/ #define SCP_RAC 0x40 /*Report a check*/ #define SCP_SWP 0x08 /*Software Write Protect*/ #define SCP_RAENP 0x04 /*Ready AEN Permission*/ #define SCP_UAAENP 0x02 /*UA AEN Permission*/ #define SCP_EAENP 0x01 /*Error AEN Permission*/ u_int8_t flags4; #define SCP_ATO 0x80 /*Application tag owner*/ #define SCP_TAS 0x40 /*Task aborted status*/ #define SCP_ATMPE 0x20 /*Application tag mode page*/ #define SCP_RWWP 0x10 /*Reject write without prot*/ u_int8_t aen_holdoff_period[2]; u_int8_t busy_timeout_period[2]; u_int8_t extended_selftest_completion_time[2]; }; struct scsi_control_ext_page { uint8_t page_code; uint8_t subpage_code; uint8_t page_length[2]; uint8_t flags; #define SCEP_TCMOS 0x04 /* Timestamp Changeable by */ #define SCEP_SCSIP 0x02 /* SCSI Precedence (clock) */ #define SCEP_IALUAE 0x01 /* Implicit ALUA Enabled */ uint8_t prio; uint8_t max_sense; uint8_t reserve[25]; }; struct scsi_cache_page { u_int8_t page_code; #define SCHP_PAGE_SAVABLE 0x80 /* Page is savable */ u_int8_t page_length; u_int8_t cache_flags; #define SCHP_FLAGS_WCE 0x04 /* Write Cache Enable */ #define SCHP_FLAGS_MF 0x02 /* Multiplication factor */ #define SCHP_FLAGS_RCD 0x01 /* Read Cache Disable */ u_int8_t rw_cache_policy; u_int8_t dis_prefetch[2]; u_int8_t min_prefetch[2]; u_int8_t max_prefetch[2]; u_int8_t max_prefetch_ceil[2]; }; /* * XXX KDM * Updated version of the cache page, as of SBC. Update this to SBC-3 and * rationalize the two. */ struct scsi_caching_page { uint8_t page_code; #define SMS_CACHING_PAGE 0x08 uint8_t page_length; uint8_t flags1; #define SCP_IC 0x80 #define SCP_ABPF 0x40 #define SCP_CAP 0x20 #define SCP_DISC 0x10 #define SCP_SIZE 0x08 #define SCP_WCE 0x04 #define SCP_MF 0x02 #define SCP_RCD 0x01 uint8_t ret_priority; uint8_t disable_pf_transfer_len[2]; uint8_t min_prefetch[2]; uint8_t max_prefetch[2]; uint8_t max_pf_ceiling[2]; uint8_t flags2; #define SCP_FSW 0x80 #define SCP_LBCSS 0x40 #define SCP_DRA 0x20 #define SCP_VS1 0x10 #define SCP_VS2 0x08 uint8_t cache_segments; uint8_t cache_seg_size[2]; uint8_t reserved; uint8_t non_cache_seg_size[3]; }; /* * XXX KDM move this off to a vendor shim. */ struct copan_debugconf_subpage { uint8_t page_code; #define DBGCNF_PAGE_CODE 0x00 uint8_t subpage; #define DBGCNF_SUBPAGE_CODE 0xF0 uint8_t page_length[2]; uint8_t page_version; #define DBGCNF_VERSION 0x00 uint8_t ctl_time_io_secs[2]; }; struct scsi_info_exceptions_page { u_int8_t page_code; #define SIEP_PAGE_SAVABLE 0x80 /* Page is savable */ u_int8_t page_length; u_int8_t info_flags; #define SIEP_FLAGS_PERF 0x80 #define SIEP_FLAGS_EBF 0x20 #define SIEP_FLAGS_EWASC 0x10 #define SIEP_FLAGS_DEXCPT 0x08 #define SIEP_FLAGS_TEST 0x04 #define SIEP_FLAGS_EBACKERR 0x02 #define SIEP_FLAGS_LOGERR 0x01 u_int8_t mrie; u_int8_t interval_timer[4]; u_int8_t report_count[4]; }; struct scsi_logical_block_provisioning_page_descr { uint8_t flags; #define SLBPPD_ENABLED 0x80 #define SLBPPD_TYPE_MASK 0x38 #define SLBPPD_ARMING_MASK 0x07 #define SLBPPD_ARMING_DEC 0x02 #define SLBPPD_ARMING_INC 0x01 uint8_t resource; uint8_t reserved[2]; uint8_t count[4]; }; struct scsi_logical_block_provisioning_page { uint8_t page_code; uint8_t subpage_code; uint8_t page_length[2]; uint8_t flags; #define SLBPP_SITUA 0x01 uint8_t reserved[11]; struct scsi_logical_block_provisioning_page_descr descr[0]; }; /* * SCSI protocol identifier values, current as of SPC4r36l. */ #define SCSI_PROTO_FC 0x00 /* Fibre Channel */ #define SCSI_PROTO_SPI 0x01 /* Parallel SCSI */ #define SCSI_PROTO_SSA 0x02 /* Serial Storage Arch. */ #define SCSI_PROTO_1394 0x03 /* IEEE 1394 (Firewire) */ #define SCSI_PROTO_RDMA 0x04 /* SCSI RDMA Protocol */ #define SCSI_PROTO_ISCSI 0x05 /* Internet SCSI */ #define SCSI_PROTO_iSCSI 0x05 /* Internet SCSI */ #define SCSI_PROTO_SAS 0x06 /* SAS Serial SCSI Protocol */ #define SCSI_PROTO_ADT 0x07 /* Automation/Drive Int. Trans. Prot.*/ #define SCSI_PROTO_ADITP 0x07 /* Automation/Drive Int. Trans. Prot.*/ #define SCSI_PROTO_ATA 0x08 /* AT Attachment Interface */ #define SCSI_PROTO_UAS 0x09 /* USB Atached SCSI */ #define SCSI_PROTO_SOP 0x0a /* SCSI over PCI Express */ #define SCSI_PROTO_NONE 0x0f /* No specific protocol */ struct scsi_proto_specific_page { u_int8_t page_code; #define SPSP_PAGE_SAVABLE 0x80 /* Page is savable */ u_int8_t page_length; u_int8_t protocol; #define SPSP_PROTO_FC SCSI_PROTO_FC #define SPSP_PROTO_SPI SCSI_PROTO_SPI #define SPSP_PROTO_SSA SCSI_PROTO_SSA #define SPSP_PROTO_1394 SCSI_PROTO_1394 #define SPSP_PROTO_RDMA SCSI_PROTO_RDMA #define SPSP_PROTO_ISCSI SCSI_PROTO_ISCSI #define SPSP_PROTO_SAS SCSI_PROTO_SAS #define SPSP_PROTO_ADT SCSI_PROTO_ADITP #define SPSP_PROTO_ATA SCSI_PROTO_ATA #define SPSP_PROTO_UAS SCSI_PROTO_UAS #define SPSP_PROTO_SOP SCSI_PROTO_SOP #define SPSP_PROTO_NONE SCSI_PROTO_NONE }; struct scsi_reserve { u_int8_t opcode; u_int8_t byte2; #define SR_EXTENT 0x01 #define SR_ID_MASK 0x0e #define SR_3RDPTY 0x10 #define SR_LUN_MASK 0xe0 u_int8_t resv_id; u_int8_t length[2]; u_int8_t control; }; struct scsi_reserve_10 { uint8_t opcode; uint8_t byte2; #define SR10_3RDPTY 0x10 #define SR10_LONGID 0x02 #define SR10_EXTENT 0x01 uint8_t resv_id; uint8_t thirdparty_id; uint8_t reserved[3]; uint8_t length[2]; uint8_t control; }; struct scsi_release { u_int8_t opcode; u_int8_t byte2; u_int8_t resv_id; u_int8_t unused[1]; u_int8_t length; u_int8_t control; }; struct scsi_release_10 { uint8_t opcode; uint8_t byte2; uint8_t resv_id; uint8_t thirdparty_id; uint8_t reserved[3]; uint8_t length[2]; uint8_t control; }; struct scsi_prevent { u_int8_t opcode; u_int8_t byte2; u_int8_t unused[2]; u_int8_t how; u_int8_t control; }; #define PR_PREVENT 0x01 #define PR_ALLOW 0x00 struct scsi_sync_cache { u_int8_t opcode; u_int8_t byte2; #define SSC_IMMED 0x02 #define SSC_RELADR 0x01 u_int8_t begin_lba[4]; u_int8_t reserved; u_int8_t lb_count[2]; u_int8_t control; }; struct scsi_sync_cache_16 { uint8_t opcode; uint8_t byte2; uint8_t begin_lba[8]; uint8_t lb_count[4]; uint8_t reserved; uint8_t control; }; struct scsi_format { uint8_t opcode; uint8_t byte2; #define SF_LONGLIST 0x20 #define SF_FMTDATA 0x10 #define SF_CMPLIST 0x08 #define SF_FORMAT_MASK 0x07 #define SF_FORMAT_BLOCK 0x00 #define SF_FORMAT_LONG_BLOCK 0x03 #define SF_FORMAT_BFI 0x04 #define SF_FORMAT_PHYS 0x05 uint8_t vendor; uint8_t interleave[2]; uint8_t control; }; struct scsi_format_header_short { uint8_t reserved; #define SF_DATA_FOV 0x80 #define SF_DATA_DPRY 0x40 #define SF_DATA_DCRT 0x20 #define SF_DATA_STPF 0x10 #define SF_DATA_IP 0x08 #define SF_DATA_DSP 0x04 #define SF_DATA_IMMED 0x02 #define SF_DATA_VS 0x01 uint8_t byte2; uint8_t defect_list_len[2]; }; struct scsi_format_header_long { uint8_t reserved; uint8_t byte2; uint8_t reserved2[2]; uint8_t defect_list_len[4]; }; struct scsi_changedef { u_int8_t opcode; u_int8_t byte2; u_int8_t unused1; u_int8_t how; u_int8_t unused[4]; u_int8_t datalen; u_int8_t control; }; struct scsi_read_buffer { u_int8_t opcode; u_int8_t byte2; #define RWB_MODE 0x1F #define RWB_MODE_HDR_DATA 0x00 #define RWB_MODE_VENDOR 0x01 #define RWB_MODE_DATA 0x02 #define RWB_MODE_DESCR 0x03 #define RWB_MODE_DOWNLOAD 0x04 #define RWB_MODE_DOWNLOAD_SAVE 0x05 #define RWB_MODE_ECHO 0x0A #define RWB_MODE_ECHO_DESCR 0x0B #define RWB_MODE_ERROR_HISTORY 0x1C u_int8_t buffer_id; u_int8_t offset[3]; u_int8_t length[3]; u_int8_t control; }; struct scsi_read_buffer_16 { uint8_t opcode; uint8_t byte2; uint8_t offset[8]; uint8_t length[4]; uint8_t buffer_id; uint8_t control; }; struct scsi_write_buffer { u_int8_t opcode; u_int8_t byte2; u_int8_t buffer_id; u_int8_t offset[3]; u_int8_t length[3]; u_int8_t control; }; struct scsi_read_attribute { u_int8_t opcode; u_int8_t service_action; #define SRA_SA_ATTR_VALUES 0x00 #define SRA_SA_ATTR_LIST 0x01 #define SRA_SA_LOG_VOL_LIST 0x02 #define SRA_SA_PART_LIST 0x03 #define SRA_SA_RESTRICTED 0x04 #define SRA_SA_SUPPORTED_ATTRS 0x05 #define SRA_SA_MASK 0x1f u_int8_t element[2]; u_int8_t elem_type; u_int8_t logical_volume; u_int8_t reserved1; u_int8_t partition; u_int8_t first_attribute[2]; u_int8_t length[4]; u_int8_t cache; #define SRA_CACHE 0x01 u_int8_t control; }; struct scsi_write_attribute { u_int8_t opcode; u_int8_t byte2; #define SWA_WTC 0x01 u_int8_t element[3]; u_int8_t logical_volume; u_int8_t reserved1; u_int8_t partition; u_int8_t reserved2[2]; u_int8_t length[4]; u_int8_t reserved3; u_int8_t control; }; struct scsi_read_attribute_values { u_int8_t length[4]; u_int8_t attribute_0[0]; }; struct scsi_mam_attribute_header { u_int8_t id[2]; /* * Attributes obtained from SPC-4r36g (section 7.4.2.2) and * SSC-4r03 (section 4.2.21). */ #define SMA_ATTR_ID_DEVICE_MIN 0x0000 #define SMA_ATTR_REM_CAP_PARTITION 0x0000 #define SMA_ATTR_MAX_CAP_PARTITION 0x0001 #define SMA_ATTR_TAPEALERT_FLAGS 0x0002 #define SMA_ATTR_LOAD_COUNT 0x0003 #define SMA_ATTR_MAM_SPACE_REMAINING 0x0004 #define SMA_ATTR_DEV_ASSIGNING_ORG 0x0005 #define SMA_ATTR_FORMAT_DENSITY_CODE 0x0006 #define SMA_ATTR_INITIALIZATION_COUNT 0x0007 #define SMA_ATTR_VOLUME_ID 0x0008 #define SMA_ATTR_VOLUME_CHANGE_REF 0x0009 #define SMA_ATTR_DEV_SERIAL_LAST_LOAD 0x020a #define SMA_ATTR_DEV_SERIAL_LAST_LOAD_1 0x020b #define SMA_ATTR_DEV_SERIAL_LAST_LOAD_2 0x020c #define SMA_ATTR_DEV_SERIAL_LAST_LOAD_3 0x020d #define SMA_ATTR_TOTAL_MB_WRITTEN_LT 0x0220 #define SMA_ATTR_TOTAL_MB_READ_LT 0x0221 #define SMA_ATTR_TOTAL_MB_WRITTEN_CUR 0x0222 #define SMA_ATTR_TOTAL_MB_READ_CUR 0x0223 #define SMA_ATTR_FIRST_ENC_BLOCK 0x0224 #define SMA_ATTR_NEXT_UNENC_BLOCK 0x0225 #define SMA_ATTR_MEDIUM_USAGE_HIST 0x0340 #define SMA_ATTR_PART_USAGE_HIST 0x0341 #define SMA_ATTR_ID_DEVICE_MAX 0x03ff #define SMA_ATTR_ID_MEDIUM_MIN 0x0400 #define SMA_ATTR_MED_MANUF 0x0400 #define SMA_ATTR_MED_SERIAL 0x0401 #define SMA_ATTR_MED_LENGTH 0x0402 #define SMA_ATTR_MED_WIDTH 0x0403 #define SMA_ATTR_MED_ASSIGNING_ORG 0x0404 #define SMA_ATTR_MED_DENSITY_CODE 0x0405 #define SMA_ATTR_MED_MANUF_DATE 0x0406 #define SMA_ATTR_MAM_CAPACITY 0x0407 #define SMA_ATTR_MED_TYPE 0x0408 #define SMA_ATTR_MED_TYPE_INFO 0x0409 #define SMA_ATTR_MED_SERIAL_NUM 0x040a #define SMA_ATTR_ID_MEDIUM_MAX 0x07ff #define SMA_ATTR_ID_HOST_MIN 0x0800 #define SMA_ATTR_APP_VENDOR 0x0800 #define SMA_ATTR_APP_NAME 0x0801 #define SMA_ATTR_APP_VERSION 0x0802 #define SMA_ATTR_USER_MED_TEXT_LABEL 0x0803 #define SMA_ATTR_LAST_WRITTEN_TIME 0x0804 #define SMA_ATTR_TEXT_LOCAL_ID 0x0805 #define SMA_ATTR_BARCODE 0x0806 #define SMA_ATTR_HOST_OWNER_NAME 0x0807 #define SMA_ATTR_MEDIA_POOL 0x0808 #define SMA_ATTR_PART_USER_LABEL 0x0809 #define SMA_ATTR_LOAD_UNLOAD_AT_PART 0x080a #define SMA_ATTR_APP_FORMAT_VERSION 0x080b #define SMA_ATTR_VOL_COHERENCY_INFO 0x080c #define SMA_ATTR_ID_HOST_MAX 0x0bff #define SMA_ATTR_VENDOR_DEVICE_MIN 0x0c00 #define SMA_ATTR_VENDOR_DEVICE_MAX 0x0fff #define SMA_ATTR_VENDOR_MEDIUM_MIN 0x1000 #define SMA_ATTR_VENDOR_MEDIUM_MAX 0x13ff #define SMA_ATTR_VENDOR_HOST_MIN 0x1400 #define SMA_ATTR_VENDOR_HOST_MAX 0x17ff u_int8_t byte2; #define SMA_FORMAT_BINARY 0x00 #define SMA_FORMAT_ASCII 0x01 #define SMA_FORMAT_TEXT 0x02 #define SMA_FORMAT_MASK 0x03 #define SMA_READ_ONLY 0x80 u_int8_t length[2]; u_int8_t attribute[0]; }; struct scsi_attrib_list_header { u_int8_t length[4]; u_int8_t first_attr_0[0]; }; struct scsi_attrib_lv_list { u_int8_t length[2]; u_int8_t first_lv_number; u_int8_t num_logical_volumes; }; struct scsi_attrib_vendser { uint8_t vendor[8]; uint8_t serial_num[32]; }; /* * These values are used to decode the Volume Coherency Information * Attribute (0x080c) for LTFS-format coherency information. * Although the Application Client Specific lengths are different for * Version 0 and Version 1, the data is in fact the same. The length * difference was due to a code bug. */ #define SCSI_LTFS_VER0_LEN 42 #define SCSI_LTFS_VER1_LEN 43 #define SCSI_LTFS_UUID_LEN 36 #define SCSI_LTFS_STR_NAME "LTFS" #define SCSI_LTFS_STR_LEN 4 typedef enum { SCSI_ATTR_FLAG_NONE = 0x00, SCSI_ATTR_FLAG_HEX = 0x01, SCSI_ATTR_FLAG_FP = 0x02, SCSI_ATTR_FLAG_DIV_10 = 0x04, SCSI_ATTR_FLAG_FP_1DIGIT = 0x08 } scsi_attrib_flags; typedef enum { SCSI_ATTR_OUTPUT_NONE = 0x00, SCSI_ATTR_OUTPUT_TEXT_MASK = 0x03, SCSI_ATTR_OUTPUT_TEXT_RAW = 0x00, SCSI_ATTR_OUTPUT_TEXT_ESC = 0x01, SCSI_ATTR_OUTPUT_TEXT_RSV1 = 0x02, SCSI_ATTR_OUTPUT_TEXT_RSV2 = 0x03, SCSI_ATTR_OUTPUT_NONASCII_MASK = 0x0c, SCSI_ATTR_OUTPUT_NONASCII_TRIM = 0x00, SCSI_ATTR_OUTPUT_NONASCII_ESC = 0x04, SCSI_ATTR_OUTPUT_NONASCII_RAW = 0x08, SCSI_ATTR_OUTPUT_NONASCII_RSV1 = 0x0c, SCSI_ATTR_OUTPUT_FIELD_MASK = 0xf0, SCSI_ATTR_OUTPUT_FIELD_ALL = 0xf0, SCSI_ATTR_OUTPUT_FIELD_NONE = 0x00, SCSI_ATTR_OUTPUT_FIELD_DESC = 0x10, SCSI_ATTR_OUTPUT_FIELD_NUM = 0x20, SCSI_ATTR_OUTPUT_FIELD_SIZE = 0x40, SCSI_ATTR_OUTPUT_FIELD_RW = 0x80 } scsi_attrib_output_flags; struct sbuf; struct scsi_attrib_table_entry { u_int32_t id; u_int32_t flags; const char *desc; const char *suffix; int (*to_str)(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int (*parse_str)(char *str, struct scsi_mam_attribute_header *hdr, uint32_t alloc_len, uint32_t flags, char *error_str, int error_str_len); }; struct scsi_rw_6 { u_int8_t opcode; u_int8_t addr[3]; /* only 5 bits are valid in the MSB address byte */ #define SRW_TOPADDR 0x1F u_int8_t length; u_int8_t control; }; struct scsi_rw_10 { u_int8_t opcode; #define SRW10_RELADDR 0x01 /* EBP defined for WRITE(10) only */ #define SRW10_EBP 0x04 #define SRW10_FUA 0x08 #define SRW10_DPO 0x10 u_int8_t byte2; u_int8_t addr[4]; u_int8_t reserved; u_int8_t length[2]; u_int8_t control; }; struct scsi_rw_12 { u_int8_t opcode; #define SRW12_RELADDR 0x01 #define SRW12_FUA 0x08 #define SRW12_DPO 0x10 u_int8_t byte2; u_int8_t addr[4]; u_int8_t length[4]; u_int8_t reserved; u_int8_t control; }; struct scsi_rw_16 { u_int8_t opcode; #define SRW16_RELADDR 0x01 #define SRW16_FUA 0x08 #define SRW16_DPO 0x10 u_int8_t byte2; u_int8_t addr[8]; u_int8_t length[4]; u_int8_t reserved; u_int8_t control; }; struct scsi_write_atomic_16 { uint8_t opcode; uint8_t byte2; uint8_t addr[8]; uint8_t boundary[2]; uint8_t length[2]; uint8_t group; uint8_t control; }; struct scsi_write_same_10 { uint8_t opcode; uint8_t byte2; #define SWS_LBDATA 0x02 #define SWS_PBDATA 0x04 #define SWS_UNMAP 0x08 #define SWS_ANCHOR 0x10 uint8_t addr[4]; uint8_t group; uint8_t length[2]; uint8_t control; }; struct scsi_write_same_16 { uint8_t opcode; uint8_t byte2; #define SWS_NDOB 0x01 uint8_t addr[8]; uint8_t length[4]; uint8_t group; uint8_t control; }; struct scsi_unmap { uint8_t opcode; uint8_t byte2; #define SU_ANCHOR 0x01 uint8_t reserved[4]; uint8_t group; uint8_t length[2]; uint8_t control; }; struct scsi_unmap_header { uint8_t length[2]; uint8_t desc_length[2]; uint8_t reserved[4]; }; struct scsi_unmap_desc { uint8_t lba[8]; uint8_t length[4]; uint8_t reserved[4]; }; struct scsi_write_verify_10 { uint8_t opcode; uint8_t byte2; #define SWV_BYTCHK 0x02 #define SWV_DPO 0x10 #define SWV_WRPROECT_MASK 0xe0 uint8_t addr[4]; uint8_t group; uint8_t length[2]; uint8_t control; }; struct scsi_write_verify_12 { uint8_t opcode; uint8_t byte2; uint8_t addr[4]; uint8_t length[4]; uint8_t group; uint8_t control; }; struct scsi_write_verify_16 { uint8_t opcode; uint8_t byte2; uint8_t addr[8]; uint8_t length[4]; uint8_t group; uint8_t control; }; struct scsi_start_stop_unit { u_int8_t opcode; u_int8_t byte2; #define SSS_IMMED 0x01 u_int8_t reserved[2]; u_int8_t how; #define SSS_START 0x01 #define SSS_LOEJ 0x02 #define SSS_PC_MASK 0xf0 #define SSS_PC_START_VALID 0x00 #define SSS_PC_ACTIVE 0x10 #define SSS_PC_IDLE 0x20 #define SSS_PC_STANDBY 0x30 #define SSS_PC_LU_CONTROL 0x70 #define SSS_PC_FORCE_IDLE_0 0xa0 #define SSS_PC_FORCE_STANDBY_0 0xb0 u_int8_t control; }; struct ata_pass_12 { u_int8_t opcode; u_int8_t protocol; #define AP_PROTO_HARD_RESET (0x00 << 1) #define AP_PROTO_SRST (0x01 << 1) #define AP_PROTO_NON_DATA (0x03 << 1) #define AP_PROTO_PIO_IN (0x04 << 1) #define AP_PROTO_PIO_OUT (0x05 << 1) #define AP_PROTO_DMA (0x06 << 1) #define AP_PROTO_DMA_QUEUED (0x07 << 1) #define AP_PROTO_DEVICE_DIAG (0x08 << 1) #define AP_PROTO_DEVICE_RESET (0x09 << 1) #define AP_PROTO_UDMA_IN (0x0a << 1) #define AP_PROTO_UDMA_OUT (0x0b << 1) #define AP_PROTO_FPDMA (0x0c << 1) #define AP_PROTO_RESP_INFO (0x0f << 1) #define AP_MULTI 0xe0 u_int8_t flags; #define AP_T_LEN 0x03 #define AP_BB 0x04 #define AP_T_DIR 0x08 #define AP_CK_COND 0x20 #define AP_OFFLINE 0x60 u_int8_t features; u_int8_t sector_count; u_int8_t lba_low; u_int8_t lba_mid; u_int8_t lba_high; u_int8_t device; u_int8_t command; u_int8_t reserved; u_int8_t control; }; struct scsi_maintenance_in { uint8_t opcode; uint8_t byte2; #define SERVICE_ACTION_MASK 0x1f #define SA_RPRT_TRGT_GRP 0x0a uint8_t reserved[4]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_report_supported_opcodes { uint8_t opcode; uint8_t service_action; uint8_t options; #define RSO_RCTD 0x80 #define RSO_OPTIONS_MASK 0x07 #define RSO_OPTIONS_ALL 0x00 #define RSO_OPTIONS_OC 0x01 #define RSO_OPTIONS_OC_SA 0x02 uint8_t requested_opcode; uint8_t requested_service_action[2]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_report_supported_opcodes_timeout { uint8_t length[2]; uint8_t reserved; uint8_t cmd_specific; uint8_t nominal_time[4]; uint8_t recommended_time[4]; }; struct scsi_report_supported_opcodes_descr { uint8_t opcode; uint8_t reserved; uint8_t service_action[2]; uint8_t reserved2; uint8_t flags; #define RSO_SERVACTV 0x01 #define RSO_CTDP 0x02 uint8_t cdb_length[2]; struct scsi_report_supported_opcodes_timeout timeout[0]; }; struct scsi_report_supported_opcodes_all { uint8_t length[4]; struct scsi_report_supported_opcodes_descr descr[0]; }; struct scsi_report_supported_opcodes_one { uint8_t reserved; uint8_t support; #define RSO_ONE_CTDP 0x80 #define RSO_ONE_SUP_MASK 0x07 #define RSO_ONE_SUP_UNAVAIL 0x00 #define RSO_ONE_SUP_NOT_SUP 0x01 #define RSO_ONE_SUP_AVAIL 0x03 #define RSO_ONE_SUP_VENDOR 0x05 uint8_t cdb_length[2]; uint8_t cdb_usage[]; }; struct scsi_report_supported_tmf { uint8_t opcode; uint8_t service_action; uint8_t reserved[4]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_report_supported_tmf_data { uint8_t byte1; #define RST_WAKES 0x01 #define RST_TRS 0x02 #define RST_QTS 0x04 #define RST_LURS 0x08 #define RST_CTSS 0x10 #define RST_CACAS 0x20 #define RST_ATSS 0x40 #define RST_ATS 0x80 uint8_t byte2; #define RST_ITNRS 0x01 #define RST_QTSS 0x02 #define RST_QAES 0x04 uint8_t reserved[2]; }; struct scsi_report_timestamp { uint8_t opcode; uint8_t service_action; uint8_t reserved[4]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_report_timestamp_data { uint8_t length[2]; uint8_t origin; #define RTS_ORIG_MASK 0x00 #define RTS_ORIG_ZERO 0x00 #define RTS_ORIG_SET 0x02 #define RTS_ORIG_OUTSIDE 0x03 uint8_t reserved; uint8_t timestamp[6]; uint8_t reserve2[2]; }; struct scsi_receive_copy_status_lid1 { uint8_t opcode; uint8_t service_action; #define RCS_RCS_LID1 0x00 uint8_t list_identifier; uint8_t reserved[7]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_receive_copy_status_lid1_data { uint8_t available_data[4]; uint8_t copy_command_status; #define RCS_CCS_INPROG 0x00 #define RCS_CCS_COMPLETED 0x01 #define RCS_CCS_ERROR 0x02 uint8_t segments_processed[2]; uint8_t transfer_count_units; #define RCS_TC_BYTES 0x00 #define RCS_TC_KBYTES 0x01 #define RCS_TC_MBYTES 0x02 #define RCS_TC_GBYTES 0x03 #define RCS_TC_TBYTES 0x04 #define RCS_TC_PBYTES 0x05 #define RCS_TC_EBYTES 0x06 #define RCS_TC_LBAS 0xf1 uint8_t transfer_count[4]; }; struct scsi_receive_copy_failure_details { uint8_t opcode; uint8_t service_action; #define RCS_RCFD 0x04 uint8_t list_identifier; uint8_t reserved[7]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_receive_copy_failure_details_data { uint8_t available_data[4]; uint8_t reserved[52]; uint8_t copy_command_status; uint8_t reserved2; uint8_t sense_data_length[2]; uint8_t sense_data[]; }; struct scsi_receive_copy_status_lid4 { uint8_t opcode; uint8_t service_action; #define RCS_RCS_LID4 0x05 uint8_t list_identifier[4]; uint8_t reserved[4]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_receive_copy_status_lid4_data { uint8_t available_data[4]; uint8_t response_to_service_action; uint8_t copy_command_status; #define RCS_CCS_COMPLETED_PROD 0x03 #define RCS_CCS_COMPLETED_RESID 0x04 #define RCS_CCS_INPROG_FGBG 0x10 #define RCS_CCS_INPROG_FG 0x11 #define RCS_CCS_INPROG_BG 0x12 #define RCS_CCS_ABORTED 0x60 uint8_t operation_counter[2]; uint8_t estimated_status_update_delay[4]; uint8_t extended_copy_completion_status; uint8_t length_of_the_sense_data_field; uint8_t sense_data_length; uint8_t transfer_count_units; uint8_t transfer_count[8]; uint8_t segments_processed[2]; uint8_t reserved[6]; uint8_t sense_data[]; }; struct scsi_receive_copy_operating_parameters { uint8_t opcode; uint8_t service_action; #define RCS_RCOP 0x03 uint8_t reserved[8]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_receive_copy_operating_parameters_data { uint8_t length[4]; uint8_t snlid; #define RCOP_SNLID 0x01 uint8_t reserved[3]; uint8_t maximum_cscd_descriptor_count[2]; uint8_t maximum_segment_descriptor_count[2]; uint8_t maximum_descriptor_list_length[4]; uint8_t maximum_segment_length[4]; uint8_t maximum_inline_data_length[4]; uint8_t held_data_limit[4]; uint8_t maximum_stream_device_transfer_size[4]; uint8_t reserved2[2]; uint8_t total_concurrent_copies[2]; uint8_t maximum_concurrent_copies; uint8_t data_segment_granularity; uint8_t inline_data_granularity; uint8_t held_data_granularity; uint8_t reserved3[3]; uint8_t implemented_descriptor_list_length; uint8_t list_of_implemented_descriptor_type_codes[0]; }; struct scsi_extended_copy { uint8_t opcode; uint8_t service_action; #define EC_EC_LID1 0x00 #define EC_EC_LID4 0x01 uint8_t reserved[8]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_ec_cscd_dtsp { uint8_t flags; #define EC_CSCD_FIXED 0x01 #define EC_CSCD_PAD 0x04 uint8_t block_length[3]; }; struct scsi_ec_cscd { uint8_t type_code; #define EC_CSCD_EXT 0xff uint8_t luidt_pdt; #define EC_NUL 0x20 #define EC_LUIDT_MASK 0xc0 #define EC_LUIDT_LUN 0x00 #define EC_LUIDT_PROXY_TOKEN 0x40 uint8_t relative_initiator_port[2]; uint8_t cscd_params[24]; struct scsi_ec_cscd_dtsp dtsp; }; struct scsi_ec_cscd_id { uint8_t type_code; #define EC_CSCD_ID 0xe4 uint8_t luidt_pdt; uint8_t relative_initiator_port[2]; uint8_t codeset; uint8_t id_type; uint8_t reserved; uint8_t length; uint8_t designator[20]; struct scsi_ec_cscd_dtsp dtsp; }; struct scsi_ec_segment { uint8_t type_code; uint8_t flags; #define EC_SEG_DC 0x02 #define EC_SEG_CAT 0x01 uint8_t descr_length[2]; uint8_t params[]; }; struct scsi_ec_segment_b2b { uint8_t type_code; #define EC_SEG_B2B 0x02 uint8_t flags; uint8_t descr_length[2]; uint8_t src_cscd[2]; uint8_t dst_cscd[2]; uint8_t reserved[2]; uint8_t number_of_blocks[2]; uint8_t src_lba[8]; uint8_t dst_lba[8]; }; struct scsi_ec_segment_verify { uint8_t type_code; #define EC_SEG_VERIFY 0x07 uint8_t reserved; uint8_t descr_length[2]; uint8_t src_cscd[2]; uint8_t reserved2[2]; uint8_t tur; uint8_t reserved3[3]; }; struct scsi_ec_segment_register_key { uint8_t type_code; #define EC_SEG_REGISTER_KEY 0x14 uint8_t reserved; uint8_t descr_length[2]; uint8_t reserved2[2]; uint8_t dst_cscd[2]; uint8_t res_key[8]; uint8_t sa_res_key[8]; uint8_t reserved3[4]; }; struct scsi_extended_copy_lid1_data { uint8_t list_identifier; uint8_t flags; #define EC_PRIORITY 0x07 #define EC_LIST_ID_USAGE_MASK 0x18 #define EC_LIST_ID_USAGE_FULL 0x08 #define EC_LIST_ID_USAGE_NOHOLD 0x10 #define EC_LIST_ID_USAGE_NONE 0x18 #define EC_STR 0x20 uint8_t cscd_list_length[2]; uint8_t reserved[4]; uint8_t segment_list_length[4]; uint8_t inline_data_length[4]; uint8_t data[]; }; struct scsi_extended_copy_lid4_data { uint8_t list_format; #define EC_LIST_FORMAT 0x01 uint8_t flags; uint8_t header_cscd_list_length[2]; uint8_t reserved[11]; uint8_t flags2; #define EC_IMMED 0x01 #define EC_G_SENSE 0x02 uint8_t header_cscd_type_code; uint8_t reserved2[3]; uint8_t list_identifier[4]; uint8_t reserved3[18]; uint8_t cscd_list_length[2]; uint8_t segment_list_length[2]; uint8_t inline_data_length[2]; uint8_t data[]; }; struct scsi_copy_operation_abort { uint8_t opcode; uint8_t service_action; #define EC_COA 0x1c uint8_t list_identifier[4]; uint8_t reserved[9]; uint8_t control; }; struct scsi_populate_token { uint8_t opcode; uint8_t service_action; #define EC_PT 0x10 uint8_t reserved[4]; uint8_t list_identifier[4]; uint8_t length[4]; uint8_t group_number; uint8_t control; }; struct scsi_range_desc { uint8_t lba[8]; uint8_t length[4]; uint8_t reserved[4]; }; struct scsi_populate_token_data { uint8_t length[2]; uint8_t flags; #define EC_PT_IMMED 0x01 #define EC_PT_RTV 0x02 uint8_t reserved; uint8_t inactivity_timeout[4]; uint8_t rod_type[4]; uint8_t reserved2[2]; uint8_t range_descriptor_length[2]; struct scsi_range_desc desc[]; }; struct scsi_write_using_token { uint8_t opcode; uint8_t service_action; #define EC_WUT 0x11 uint8_t reserved[4]; uint8_t list_identifier[4]; uint8_t length[4]; uint8_t group_number; uint8_t control; }; struct scsi_write_using_token_data { uint8_t length[2]; uint8_t flags; #define EC_WUT_IMMED 0x01 #define EC_WUT_DEL_TKN 0x02 uint8_t reserved[5]; uint8_t offset_into_rod[8]; uint8_t rod_token[512]; uint8_t reserved2[6]; uint8_t range_descriptor_length[2]; struct scsi_range_desc desc[]; }; struct scsi_receive_rod_token_information { uint8_t opcode; uint8_t service_action; #define RCS_RRTI 0x07 uint8_t list_identifier[4]; uint8_t reserved[4]; uint8_t length[4]; uint8_t reserved2; uint8_t control; }; struct scsi_token { uint8_t type[4]; #define ROD_TYPE_INTERNAL 0x00000000 #define ROD_TYPE_AUR 0x00010000 #define ROD_TYPE_PIT_DEF 0x00800000 #define ROD_TYPE_PIT_VULN 0x00800001 #define ROD_TYPE_PIT_PERS 0x00800002 #define ROD_TYPE_PIT_ANY 0x0080FFFF #define ROD_TYPE_BLOCK_ZERO 0xFFFF0001 uint8_t reserved[2]; uint8_t length[2]; uint8_t body[0]; }; struct scsi_report_all_rod_tokens { uint8_t opcode; uint8_t service_action; #define RCS_RART 0x08 uint8_t reserved[8]; uint8_t length[4]; uint8_t reserved2; uint8_t control; }; struct scsi_report_all_rod_tokens_data { uint8_t available_data[4]; uint8_t reserved[4]; uint8_t rod_management_token_list[]; }; struct ata_pass_16 { u_int8_t opcode; u_int8_t protocol; #define AP_EXTEND 0x01 u_int8_t flags; #define AP_FLAG_TLEN_NO_DATA (0 << 0) #define AP_FLAG_TLEN_FEAT (1 << 0) #define AP_FLAG_TLEN_SECT_CNT (2 << 0) #define AP_FLAG_TLEN_STPSIU (3 << 0) #define AP_FLAG_BYT_BLOK_BYTES (0 << 2) #define AP_FLAG_BYT_BLOK_BLOCKS (1 << 2) #define AP_FLAG_TDIR_TO_DEV (0 << 3) #define AP_FLAG_TDIR_FROM_DEV (1 << 3) #define AP_FLAG_CHK_COND (1 << 5) u_int8_t features_ext; u_int8_t features; u_int8_t sector_count_ext; u_int8_t sector_count; u_int8_t lba_low_ext; u_int8_t lba_low; u_int8_t lba_mid_ext; u_int8_t lba_mid; u_int8_t lba_high_ext; u_int8_t lba_high; u_int8_t device; u_int8_t command; u_int8_t control; }; #define SC_SCSI_1 0x01 #define SC_SCSI_2 0x03 /* * Opcodes */ #define TEST_UNIT_READY 0x00 #define REQUEST_SENSE 0x03 #define READ_6 0x08 #define WRITE_6 0x0A #define INQUIRY 0x12 #define MODE_SELECT_6 0x15 #define MODE_SENSE_6 0x1A #define START_STOP_UNIT 0x1B #define START_STOP 0x1B #define RESERVE 0x16 #define RELEASE 0x17 #define RECEIVE_DIAGNOSTIC 0x1C #define SEND_DIAGNOSTIC 0x1D #define PREVENT_ALLOW 0x1E #define READ_CAPACITY 0x25 #define READ_10 0x28 #define WRITE_10 0x2A #define POSITION_TO_ELEMENT 0x2B #define WRITE_VERIFY_10 0x2E #define VERIFY_10 0x2F #define SYNCHRONIZE_CACHE 0x35 #define READ_DEFECT_DATA_10 0x37 #define WRITE_BUFFER 0x3B #define READ_BUFFER 0x3C #define CHANGE_DEFINITION 0x40 #define WRITE_SAME_10 0x41 #define UNMAP 0x42 #define LOG_SELECT 0x4C #define LOG_SENSE 0x4D #define MODE_SELECT_10 0x55 #define RESERVE_10 0x56 #define RELEASE_10 0x57 #define MODE_SENSE_10 0x5A #define PERSISTENT_RES_IN 0x5E #define PERSISTENT_RES_OUT 0x5F #define EXTENDED_COPY 0x83 #define RECEIVE_COPY_STATUS 0x84 #define ATA_PASS_16 0x85 #define READ_16 0x88 #define COMPARE_AND_WRITE 0x89 #define WRITE_16 0x8A #define READ_ATTRIBUTE 0x8C #define WRITE_ATTRIBUTE 0x8D #define WRITE_VERIFY_16 0x8E #define VERIFY_16 0x8F #define SYNCHRONIZE_CACHE_16 0x91 #define WRITE_SAME_16 0x93 #define READ_BUFFER_16 0x9B #define WRITE_ATOMIC_16 0x9C #define SERVICE_ACTION_IN 0x9E #define REPORT_LUNS 0xA0 #define ATA_PASS_12 0xA1 #define SECURITY_PROTOCOL_IN 0xA2 #define MAINTENANCE_IN 0xA3 #define MAINTENANCE_OUT 0xA4 #define MOVE_MEDIUM 0xA5 #define READ_12 0xA8 #define WRITE_12 0xAA #define WRITE_VERIFY_12 0xAE #define VERIFY_12 0xAF #define SECURITY_PROTOCOL_OUT 0xB5 #define READ_ELEMENT_STATUS 0xB8 #define READ_CD 0xBE /* Maintenance In Service Action Codes */ #define REPORT_IDENTIFYING_INFRMATION 0x05 #define REPORT_TARGET_PORT_GROUPS 0x0A #define REPORT_ALIASES 0x0B #define REPORT_SUPPORTED_OPERATION_CODES 0x0C #define REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0D #define REPORT_PRIORITY 0x0E #define REPORT_TIMESTAMP 0x0F #define MANAGEMENT_PROTOCOL_IN 0x10 /* Maintenance Out Service Action Codes */ #define SET_IDENTIFY_INFORMATION 0x06 #define SET_TARGET_PORT_GROUPS 0x0A #define CHANGE_ALIASES 0x0B #define SET_PRIORITY 0x0E #define SET_TIMESTAMP 0x0F #define MANGAEMENT_PROTOCOL_OUT 0x10 /* * Device Types */ #define T_DIRECT 0x00 #define T_SEQUENTIAL 0x01 #define T_PRINTER 0x02 #define T_PROCESSOR 0x03 #define T_WORM 0x04 #define T_CDROM 0x05 #define T_SCANNER 0x06 #define T_OPTICAL 0x07 #define T_CHANGER 0x08 #define T_COMM 0x09 #define T_ASC0 0x0a #define T_ASC1 0x0b #define T_STORARRAY 0x0c #define T_ENCLOSURE 0x0d #define T_RBC 0x0e #define T_OCRW 0x0f #define T_OSD 0x11 #define T_ADC 0x12 #define T_NODEVICE 0x1f #define T_ANY 0xff /* Used in Quirk table matches */ #define T_REMOV 1 #define T_FIXED 0 /* * This length is the initial inquiry length used by the probe code, as * well as the length necessary for scsi_print_inquiry() to function * correctly. If either use requires a different length in the future, * the two values should be de-coupled. */ #define SHORT_INQUIRY_LENGTH 36 struct scsi_inquiry_data { u_int8_t device; #define SID_TYPE(inq_data) ((inq_data)->device & 0x1f) #define SID_QUAL(inq_data) (((inq_data)->device & 0xE0) >> 5) #define SID_QUAL_LU_CONNECTED 0x00 /* * The specified peripheral device * type is currently connected to * logical unit. If the target cannot * determine whether or not a physical * device is currently connected, it * shall also use this peripheral * qualifier when returning the INQUIRY * data. This peripheral qualifier * does not mean that the device is * ready for access by the initiator. */ #define SID_QUAL_LU_OFFLINE 0x01 /* * The target is capable of supporting * the specified peripheral device type * on this logical unit; however, the * physical device is not currently * connected to this logical unit. */ #define SID_QUAL_RSVD 0x02 #define SID_QUAL_BAD_LU 0x03 /* * The target is not capable of * supporting a physical device on * this logical unit. For this * peripheral qualifier the peripheral * device type shall be set to 1Fh to * provide compatibility with previous * versions of SCSI. All other * peripheral device type values are * reserved for this peripheral * qualifier. */ #define SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x04) != 0) u_int8_t dev_qual2; #define SID_QUAL2 0x7F #define SID_LU_CONG 0x40 #define SID_RMB 0x80 #define SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & SID_RMB) != 0) u_int8_t version; #define SID_ANSI_REV(inq_data) ((inq_data)->version & 0x07) #define SCSI_REV_0 0 #define SCSI_REV_CCS 1 #define SCSI_REV_2 2 #define SCSI_REV_SPC 3 #define SCSI_REV_SPC2 4 #define SCSI_REV_SPC3 5 #define SCSI_REV_SPC4 6 #define SID_ECMA 0x38 #define SID_ISO 0xC0 u_int8_t response_format; #define SID_AENC 0x80 #define SID_TrmIOP 0x40 #define SID_NormACA 0x20 #define SID_HiSup 0x10 u_int8_t additional_length; #define SID_ADDITIONAL_LENGTH(iqd) \ ((iqd)->additional_length + \ __offsetof(struct scsi_inquiry_data, additional_length) + 1) u_int8_t spc3_flags; #define SPC3_SID_PROTECT 0x01 #define SPC3_SID_3PC 0x08 #define SPC3_SID_TPGS_MASK 0x30 #define SPC3_SID_TPGS_IMPLICIT 0x10 #define SPC3_SID_TPGS_EXPLICIT 0x20 #define SPC3_SID_ACC 0x40 #define SPC3_SID_SCCS 0x80 u_int8_t spc2_flags; #define SPC2_SID_ADDR16 0x01 #define SPC2_SID_MChngr 0x08 #define SPC2_SID_MultiP 0x10 #define SPC2_SID_EncServ 0x40 #define SPC2_SID_BQueue 0x80 #define INQ_DATA_TQ_ENABLED(iqd) \ ((SID_ANSI_REV(iqd) < SCSI_REV_SPC2)? ((iqd)->flags & SID_CmdQue) : \ (((iqd)->flags & SID_CmdQue) && !((iqd)->spc2_flags & SPC2_SID_BQueue)) || \ (!((iqd)->flags & SID_CmdQue) && ((iqd)->spc2_flags & SPC2_SID_BQueue))) u_int8_t flags; #define SID_SftRe 0x01 #define SID_CmdQue 0x02 #define SID_Linked 0x08 #define SID_Sync 0x10 #define SID_WBus16 0x20 #define SID_WBus32 0x40 #define SID_RelAdr 0x80 #define SID_VENDOR_SIZE 8 char vendor[SID_VENDOR_SIZE]; #define SID_PRODUCT_SIZE 16 char product[SID_PRODUCT_SIZE]; #define SID_REVISION_SIZE 4 char revision[SID_REVISION_SIZE]; /* * The following fields were taken from SCSI Primary Commands - 2 * (SPC-2) Revision 14, Dated 11 November 1999 */ #define SID_VENDOR_SPECIFIC_0_SIZE 20 u_int8_t vendor_specific0[SID_VENDOR_SPECIFIC_0_SIZE]; /* * An extension of SCSI Parallel Specific Values */ #define SID_SPI_IUS 0x01 #define SID_SPI_QAS 0x02 #define SID_SPI_CLOCK_ST 0x00 #define SID_SPI_CLOCK_DT 0x04 #define SID_SPI_CLOCK_DT_ST 0x0C #define SID_SPI_MASK 0x0F u_int8_t spi3data; u_int8_t reserved2; /* * Version Descriptors, stored 2 byte values. */ u_int8_t version1[2]; u_int8_t version2[2]; u_int8_t version3[2]; u_int8_t version4[2]; u_int8_t version5[2]; u_int8_t version6[2]; u_int8_t version7[2]; u_int8_t version8[2]; u_int8_t reserved3[22]; #define SID_VENDOR_SPECIFIC_1_SIZE 160 u_int8_t vendor_specific1[SID_VENDOR_SPECIFIC_1_SIZE]; }; /* * This structure is more suited to initiator operation, because the * maximum number of supported pages is already allocated. */ struct scsi_vpd_supported_page_list { u_int8_t device; u_int8_t page_code; #define SVPD_SUPPORTED_PAGE_LIST 0x00 #define SVPD_SUPPORTED_PAGES_HDR_LEN 4 u_int8_t reserved; u_int8_t length; /* number of VPD entries */ #define SVPD_SUPPORTED_PAGES_SIZE 251 u_int8_t list[SVPD_SUPPORTED_PAGES_SIZE]; }; /* * This structure is more suited to target operation, because the * number of supported pages is left to the user to allocate. */ struct scsi_vpd_supported_pages { u_int8_t device; u_int8_t page_code; u_int8_t reserved; #define SVPD_SUPPORTED_PAGES 0x00 u_int8_t length; u_int8_t page_list[0]; }; struct scsi_vpd_unit_serial_number { u_int8_t device; u_int8_t page_code; #define SVPD_UNIT_SERIAL_NUMBER 0x80 u_int8_t reserved; u_int8_t length; /* serial number length */ #define SVPD_SERIAL_NUM_SIZE 251 u_int8_t serial_num[SVPD_SERIAL_NUM_SIZE]; }; struct scsi_vpd_device_id { u_int8_t device; u_int8_t page_code; #define SVPD_DEVICE_ID 0x83 #define SVPD_DEVICE_ID_MAX_SIZE 252 #define SVPD_DEVICE_ID_HDR_LEN \ __offsetof(struct scsi_vpd_device_id, desc_list) u_int8_t length[2]; u_int8_t desc_list[]; }; struct scsi_vpd_id_descriptor { u_int8_t proto_codeset; /* * See the SCSI_PROTO definitions above for the protocols. */ #define SVPD_ID_PROTO_SHIFT 4 #define SVPD_ID_CODESET_BINARY 0x01 #define SVPD_ID_CODESET_ASCII 0x02 #define SVPD_ID_CODESET_UTF8 0x03 #define SVPD_ID_CODESET_MASK 0x0f u_int8_t id_type; #define SVPD_ID_PIV 0x80 #define SVPD_ID_ASSOC_LUN 0x00 #define SVPD_ID_ASSOC_PORT 0x10 #define SVPD_ID_ASSOC_TARGET 0x20 #define SVPD_ID_ASSOC_MASK 0x30 #define SVPD_ID_TYPE_VENDOR 0x00 #define SVPD_ID_TYPE_T10 0x01 #define SVPD_ID_TYPE_EUI64 0x02 #define SVPD_ID_TYPE_NAA 0x03 #define SVPD_ID_TYPE_RELTARG 0x04 #define SVPD_ID_TYPE_TPORTGRP 0x05 #define SVPD_ID_TYPE_LUNGRP 0x06 #define SVPD_ID_TYPE_MD5_LUN_ID 0x07 #define SVPD_ID_TYPE_SCSI_NAME 0x08 #define SVPD_ID_TYPE_PROTO 0x09 #define SVPD_ID_TYPE_UUID 0x0a #define SVPD_ID_TYPE_MASK 0x0f u_int8_t reserved; u_int8_t length; #define SVPD_DEVICE_ID_DESC_HDR_LEN \ __offsetof(struct scsi_vpd_id_descriptor, identifier) u_int8_t identifier[]; }; struct scsi_vpd_id_t10 { u_int8_t vendor[8]; u_int8_t vendor_spec_id[0]; }; struct scsi_vpd_id_eui64 { u_int8_t ieee_company_id[3]; u_int8_t extension_id[5]; }; struct scsi_vpd_id_naa_basic { uint8_t naa; /* big endian, packed: uint8_t naa : 4; uint8_t naa_desig : 4; */ #define SVPD_ID_NAA_NAA_SHIFT 4 #define SVPD_ID_NAA_IEEE_EXT 0x02 #define SVPD_ID_NAA_LOCAL_REG 0x03 #define SVPD_ID_NAA_IEEE_REG 0x05 #define SVPD_ID_NAA_IEEE_REG_EXT 0x06 uint8_t naa_data[]; }; struct scsi_vpd_id_naa_ieee_extended_id { uint8_t naa; uint8_t vendor_specific_id_a; uint8_t ieee_company_id[3]; uint8_t vendor_specific_id_b[4]; }; struct scsi_vpd_id_naa_local_reg { uint8_t naa; uint8_t local_value[7]; }; struct scsi_vpd_id_naa_ieee_reg { uint8_t naa; uint8_t reg_value[7]; /* big endian, packed: uint8_t naa_basic : 4; uint8_t ieee_company_id_0 : 4; uint8_t ieee_company_id_1[2]; uint8_t ieee_company_id_2 : 4; uint8_t vendor_specific_id_0 : 4; uint8_t vendor_specific_id_1[4]; */ }; struct scsi_vpd_id_naa_ieee_reg_extended { uint8_t naa; uint8_t reg_value[15]; /* big endian, packed: uint8_t naa_basic : 4; uint8_t ieee_company_id_0 : 4; uint8_t ieee_company_id_1[2]; uint8_t ieee_company_id_2 : 4; uint8_t vendor_specific_id_0 : 4; uint8_t vendor_specific_id_1[4]; uint8_t vendor_specific_id_ext[8]; */ }; struct scsi_vpd_id_rel_trgt_port_id { uint8_t obsolete[2]; uint8_t rel_trgt_port_id[2]; }; struct scsi_vpd_id_trgt_port_grp_id { uint8_t reserved[2]; uint8_t trgt_port_grp[2]; }; struct scsi_vpd_id_lun_grp_id { uint8_t reserved[2]; uint8_t log_unit_grp[2]; }; struct scsi_vpd_id_md5_lun_id { uint8_t lun_id[16]; }; struct scsi_vpd_id_scsi_name { uint8_t name_string[256]; }; struct scsi_service_action_in { uint8_t opcode; uint8_t service_action; uint8_t action_dependent[13]; uint8_t control; }; struct scsi_vpd_extended_inquiry_data { uint8_t device; uint8_t page_code; #define SVPD_EXTENDED_INQUIRY_DATA 0x86 uint8_t page_length[2]; uint8_t flags1; /* These values are for direct access devices */ #define SVPD_EID_AM_MASK 0xC0 #define SVPD_EID_AM_DEFER 0x80 #define SVPD_EID_AM_IMMED 0x40 #define SVPD_EID_AM_UNDEFINED 0x00 #define SVPD_EID_AM_RESERVED 0xc0 #define SVPD_EID_SPT 0x38 #define SVPD_EID_SPT_1 0x00 #define SVPD_EID_SPT_12 0x08 #define SVPD_EID_SPT_2 0x10 #define SVPD_EID_SPT_13 0x18 #define SVPD_EID_SPT_3 0x20 #define SVPD_EID_SPT_23 0x28 #define SVPD_EID_SPT_123 0x38 /* These values are for sequential access devices */ #define SVPD_EID_SA_SPT_LBP 0x08 #define SVPD_EID_GRD_CHK 0x04 #define SVPD_EID_APP_CHK 0x02 #define SVPD_EID_REF_CHK 0x01 uint8_t flags2; #define SVPD_EID_UASK_SUP 0x20 #define SVPD_EID_GROUP_SUP 0x10 #define SVPD_EID_PRIOR_SUP 0x08 #define SVPD_EID_HEADSUP 0x04 #define SVPD_EID_ORDSUP 0x02 #define SVPD_EID_SIMPSUP 0x01 uint8_t flags3; #define SVPD_EID_WU_SUP 0x08 #define SVPD_EID_CRD_SUP 0x04 #define SVPD_EID_NV_SUP 0x02 #define SVPD_EID_V_SUP 0x01 uint8_t flags4; #define SVPD_EID_P_I_I_SUP 0x10 #define SVPD_EID_LUICLT 0x01 uint8_t flags5; #define SVPD_EID_R_SUP 0x10 #define SVPD_EID_CBCS 0x01 uint8_t flags6; #define SVPD_EID_MULTI_I_T_FW 0x0F #define SVPD_EID_MC_VENDOR_SPEC 0x00 #define SVPD_EID_MC_MODE_1 0x01 #define SVPD_EID_MC_MODE_2 0x02 #define SVPD_EID_MC_MODE_3 0x03 uint8_t est[2]; uint8_t flags7; #define SVPD_EID_POA_SUP 0x80 #define SVPD_EID_HRA_SUP 0x80 #define SVPD_EID_VSA_SUP 0x80 uint8_t max_sense_length; uint8_t reserved2[50]; }; struct scsi_vpd_mode_page_policy_descr { uint8_t page_code; uint8_t subpage_code; uint8_t policy; #define SVPD_MPP_SHARED 0x00 #define SVPD_MPP_PORT 0x01 #define SVPD_MPP_I_T 0x03 #define SVPD_MPP_MLUS 0x80 uint8_t reserved; }; struct scsi_vpd_mode_page_policy { uint8_t device; uint8_t page_code; #define SVPD_MODE_PAGE_POLICY 0x87 uint8_t page_length[2]; struct scsi_vpd_mode_page_policy_descr descr[0]; }; struct scsi_diag_page { uint8_t page_code; uint8_t page_specific_flags; uint8_t length[2]; uint8_t params[0]; }; struct scsi_vpd_port_designation { uint8_t reserved[2]; uint8_t relative_port_id[2]; uint8_t reserved2[2]; uint8_t initiator_transportid_length[2]; uint8_t initiator_transportid[0]; }; struct scsi_vpd_port_designation_cont { uint8_t reserved[2]; uint8_t target_port_descriptors_length[2]; struct scsi_vpd_id_descriptor target_port_descriptors[0]; }; struct scsi_vpd_scsi_ports { u_int8_t device; u_int8_t page_code; #define SVPD_SCSI_PORTS 0x88 u_int8_t page_length[2]; struct scsi_vpd_port_designation design[]; }; /* * ATA Information VPD Page based on * T10/2126-D Revision 04 */ #define SVPD_ATA_INFORMATION 0x89 struct scsi_vpd_tpc_descriptor { uint8_t desc_type[2]; uint8_t desc_length[2]; uint8_t parameters[]; }; struct scsi_vpd_tpc_descriptor_bdrl { uint8_t desc_type[2]; #define SVPD_TPC_BDRL 0x0000 uint8_t desc_length[2]; uint8_t vendor_specific[6]; uint8_t maximum_ranges[2]; uint8_t maximum_inactivity_timeout[4]; uint8_t default_inactivity_timeout[4]; uint8_t maximum_token_transfer_size[8]; uint8_t optimal_transfer_count[8]; }; struct scsi_vpd_tpc_descriptor_sc_descr { uint8_t opcode; uint8_t sa_length; uint8_t supported_service_actions[0]; }; struct scsi_vpd_tpc_descriptor_sc { uint8_t desc_type[2]; #define SVPD_TPC_SC 0x0001 uint8_t desc_length[2]; uint8_t list_length; struct scsi_vpd_tpc_descriptor_sc_descr descr[]; }; struct scsi_vpd_tpc_descriptor_pd { uint8_t desc_type[2]; #define SVPD_TPC_PD 0x0004 uint8_t desc_length[2]; uint8_t reserved[4]; uint8_t maximum_cscd_descriptor_count[2]; uint8_t maximum_segment_descriptor_count[2]; uint8_t maximum_descriptor_list_length[4]; uint8_t maximum_inline_data_length[4]; uint8_t reserved2[12]; }; struct scsi_vpd_tpc_descriptor_sd { uint8_t desc_type[2]; #define SVPD_TPC_SD 0x0008 uint8_t desc_length[2]; uint8_t list_length; uint8_t supported_descriptor_codes[]; }; struct scsi_vpd_tpc_descriptor_sdid { uint8_t desc_type[2]; #define SVPD_TPC_SDID 0x000C uint8_t desc_length[2]; uint8_t list_length[2]; uint8_t supported_descriptor_ids[]; }; struct scsi_vpd_tpc_descriptor_rtf_block { uint8_t type_format; #define SVPD_TPC_RTF_BLOCK 0x00 uint8_t reserved; uint8_t desc_length[2]; uint8_t reserved2[2]; uint8_t optimal_length_granularity[2]; uint8_t maximum_bytes[8]; uint8_t optimal_bytes[8]; uint8_t optimal_bytes_to_token_per_segment[8]; uint8_t optimal_bytes_from_token_per_segment[8]; uint8_t reserved3[8]; }; struct scsi_vpd_tpc_descriptor_rtf { uint8_t desc_type[2]; #define SVPD_TPC_RTF 0x0106 uint8_t desc_length[2]; uint8_t remote_tokens; uint8_t reserved[11]; uint8_t minimum_token_lifetime[4]; uint8_t maximum_token_lifetime[4]; uint8_t maximum_token_inactivity_timeout[4]; uint8_t reserved2[18]; uint8_t type_specific_features_length[2]; uint8_t type_specific_features[0]; }; struct scsi_vpd_tpc_descriptor_srtd { uint8_t rod_type[4]; uint8_t flags; #define SVPD_TPC_SRTD_TOUT 0x01 #define SVPD_TPC_SRTD_TIN 0x02 #define SVPD_TPC_SRTD_ECPY 0x80 uint8_t reserved; uint8_t preference_indicator[2]; uint8_t reserved2[56]; }; struct scsi_vpd_tpc_descriptor_srt { uint8_t desc_type[2]; #define SVPD_TPC_SRT 0x0108 uint8_t desc_length[2]; uint8_t reserved[2]; uint8_t rod_type_descriptors_length[2]; uint8_t rod_type_descriptors[0]; }; struct scsi_vpd_tpc_descriptor_gco { uint8_t desc_type[2]; #define SVPD_TPC_GCO 0x8001 uint8_t desc_length[2]; uint8_t total_concurrent_copies[4]; uint8_t maximum_identified_concurrent_copies[4]; uint8_t maximum_segment_length[4]; uint8_t data_segment_granularity; uint8_t inline_data_granularity; uint8_t reserved[18]; }; struct scsi_vpd_tpc { uint8_t device; uint8_t page_code; #define SVPD_SCSI_TPC 0x8F uint8_t page_length[2]; struct scsi_vpd_tpc_descriptor descr[]; }; /* * Block Device Characteristics VPD Page based on * T10/1799-D Revision 31 */ struct scsi_vpd_block_characteristics { u_int8_t device; u_int8_t page_code; #define SVPD_BDC 0xB1 u_int8_t page_length[2]; u_int8_t medium_rotation_rate[2]; #define SVPD_BDC_RATE_NOT_REPORTED 0x00 #define SVPD_BDC_RATE_NON_ROTATING 0x01 u_int8_t reserved1; u_int8_t nominal_form_factor; #define SVPD_BDC_FORM_NOT_REPORTED 0x00 #define SVPD_BDC_FORM_5_25INCH 0x01 #define SVPD_BDC_FORM_3_5INCH 0x02 #define SVPD_BDC_FORM_2_5INCH 0x03 #define SVPD_BDC_FORM_1_5INCH 0x04 #define SVPD_BDC_FORM_LESSTHAN_1_5INCH 0x05 u_int8_t reserved2[56]; }; /* * Block Device Characteristics VPD Page */ struct scsi_vpd_block_device_characteristics { uint8_t device; uint8_t page_code; #define SVPD_BDC 0xB1 uint8_t page_length[2]; uint8_t medium_rotation_rate[2]; #define SVPD_NOT_REPORTED 0x0000 #define SVPD_NON_ROTATING 0x0001 uint8_t product_type; uint8_t wab_wac_ff; uint8_t flags; #define SVPD_VBULS 0x01 #define SVPD_FUAB 0x02 #define SVPD_HAW_ZBC 0x10 uint8_t reserved[55]; }; /* * Logical Block Provisioning VPD Page based on * T10/1799-D Revision 31 */ struct scsi_vpd_logical_block_prov { u_int8_t device; u_int8_t page_code; #define SVPD_LBP 0xB2 u_int8_t page_length[2]; #define SVPD_LBP_PL_BASIC 0x04 u_int8_t threshold_exponent; u_int8_t flags; #define SVPD_LBP_UNMAP 0x80 #define SVPD_LBP_WS16 0x40 #define SVPD_LBP_WS10 0x20 #define SVPD_LBP_RZ 0x04 #define SVPD_LBP_ANC_SUP 0x02 #define SVPD_LBP_DP 0x01 u_int8_t prov_type; #define SVPD_LBP_RESOURCE 0x01 #define SVPD_LBP_THIN 0x02 u_int8_t reserved; /* * Provisioning Group Descriptor can be here if SVPD_LBP_DP is set * Its size can be determined from page_length - 4 */ }; /* * Block Limits VDP Page based on SBC-4 Revision 2 */ struct scsi_vpd_block_limits { u_int8_t device; u_int8_t page_code; #define SVPD_BLOCK_LIMITS 0xB0 u_int8_t page_length[2]; #define SVPD_BL_PL_BASIC 0x10 #define SVPD_BL_PL_TP 0x3C u_int8_t reserved1; u_int8_t max_cmp_write_len; u_int8_t opt_txfer_len_grain[2]; u_int8_t max_txfer_len[4]; u_int8_t opt_txfer_len[4]; u_int8_t max_prefetch[4]; u_int8_t max_unmap_lba_cnt[4]; u_int8_t max_unmap_blk_cnt[4]; u_int8_t opt_unmap_grain[4]; u_int8_t unmap_grain_align[4]; u_int8_t max_write_same_length[8]; u_int8_t max_atomic_transfer_length[4]; u_int8_t atomic_alignment[4]; u_int8_t atomic_transfer_length_granularity[4]; u_int8_t max_atomic_transfer_length_with_atomic_boundary[4]; u_int8_t max_atomic_boundary_size[4]; }; struct scsi_read_capacity { u_int8_t opcode; u_int8_t byte2; #define SRC_RELADR 0x01 u_int8_t addr[4]; u_int8_t unused[2]; u_int8_t pmi; #define SRC_PMI 0x01 u_int8_t control; }; struct scsi_read_capacity_16 { uint8_t opcode; #define SRC16_SERVICE_ACTION 0x10 uint8_t service_action; uint8_t addr[8]; uint8_t alloc_len[4]; #define SRC16_PMI 0x01 #define SRC16_RELADR 0x02 uint8_t reladr; uint8_t control; }; struct scsi_read_capacity_data { u_int8_t addr[4]; u_int8_t length[4]; }; struct scsi_read_capacity_data_long { uint8_t addr[8]; uint8_t length[4]; #define SRC16_PROT_EN 0x01 #define SRC16_P_TYPE 0x0e #define SRC16_PTYPE_1 0x00 #define SRC16_PTYPE_2 0x02 #define SRC16_PTYPE_3 0x04 uint8_t prot; #define SRC16_LBPPBE 0x0f #define SRC16_PI_EXPONENT 0xf0 #define SRC16_PI_EXPONENT_SHIFT 4 uint8_t prot_lbppbe; #define SRC16_LALBA 0x3f #define SRC16_LBPRZ 0x40 #define SRC16_LBPME 0x80 /* * Alternate versions of these macros that are intended for use on a 16-bit * version of the lalba_lbp field instead of the array of 2 8 bit numbers. */ #define SRC16_LALBA_A 0x3fff #define SRC16_LBPRZ_A 0x4000 #define SRC16_LBPME_A 0x8000 uint8_t lalba_lbp[2]; uint8_t reserved[16]; }; struct scsi_get_lba_status { uint8_t opcode; #define SGLS_SERVICE_ACTION 0x12 uint8_t service_action; uint8_t addr[8]; uint8_t alloc_len[4]; uint8_t reserved; uint8_t control; }; struct scsi_get_lba_status_data_descr { uint8_t addr[8]; uint8_t length[4]; uint8_t status; uint8_t reserved[3]; }; struct scsi_get_lba_status_data { uint8_t length[4]; uint8_t reserved[4]; struct scsi_get_lba_status_data_descr descr[]; }; struct scsi_report_luns { uint8_t opcode; uint8_t reserved1; #define RPL_REPORT_DEFAULT 0x00 #define RPL_REPORT_WELLKNOWN 0x01 #define RPL_REPORT_ALL 0x02 #define RPL_REPORT_ADMIN 0x10 #define RPL_REPORT_NONSUBSID 0x11 #define RPL_REPORT_CONGLOM 0x12 uint8_t select_report; uint8_t reserved2[3]; uint8_t length[4]; uint8_t reserved3; uint8_t control; }; struct scsi_report_luns_lundata { uint8_t lundata[8]; #define RPL_LUNDATA_PERIPH_BUS_MASK 0x3f #define RPL_LUNDATA_FLAT_LUN_MASK 0x3f #define RPL_LUNDATA_FLAT_LUN_BITS 0x06 #define RPL_LUNDATA_LUN_TARG_MASK 0x3f #define RPL_LUNDATA_LUN_BUS_MASK 0xe0 #define RPL_LUNDATA_LUN_LUN_MASK 0x1f #define RPL_LUNDATA_EXT_LEN_MASK 0x30 #define RPL_LUNDATA_EXT_EAM_MASK 0x0f #define RPL_LUNDATA_EXT_EAM_WK 0x01 #define RPL_LUNDATA_EXT_EAM_NOT_SPEC 0x0f #define RPL_LUNDATA_ATYP_MASK 0xc0 /* MBZ for type 0 lun */ #define RPL_LUNDATA_ATYP_PERIPH 0x00 #define RPL_LUNDATA_ATYP_FLAT 0x40 #define RPL_LUNDATA_ATYP_LUN 0x80 #define RPL_LUNDATA_ATYP_EXTLUN 0xc0 }; struct scsi_report_luns_data { u_int8_t length[4]; /* length of LUN inventory, in bytes */ u_int8_t reserved[4]; /* unused */ /* * LUN inventory- we only support the type zero form for now. */ struct scsi_report_luns_lundata luns[0]; }; struct scsi_target_group { uint8_t opcode; uint8_t service_action; #define STG_PDF_MASK 0xe0 #define STG_PDF_LENGTH 0x00 #define STG_PDF_EXTENDED 0x20 uint8_t reserved1[4]; uint8_t length[4]; uint8_t reserved2; uint8_t control; }; struct scsi_target_port_descriptor { uint8_t reserved[2]; uint8_t relative_target_port_identifier[2]; uint8_t desc_list[]; }; struct scsi_target_port_group_descriptor { uint8_t pref_state; #define TPG_PRIMARY 0x80 #define TPG_ASYMMETRIC_ACCESS_STATE_MASK 0xf #define TPG_ASYMMETRIC_ACCESS_OPTIMIZED 0x0 #define TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED 0x1 #define TPG_ASYMMETRIC_ACCESS_STANDBY 0x2 #define TPG_ASYMMETRIC_ACCESS_UNAVAILABLE 0x3 #define TPG_ASYMMETRIC_ACCESS_LBA_DEPENDENT 0x4 #define TPG_ASYMMETRIC_ACCESS_OFFLINE 0xE #define TPG_ASYMMETRIC_ACCESS_TRANSITIONING 0xF uint8_t support; #define TPG_AO_SUP 0x01 #define TPG_AN_SUP 0x02 #define TPG_S_SUP 0x04 #define TPG_U_SUP 0x08 #define TPG_LBD_SUP 0x10 #define TPG_O_SUP 0x40 #define TPG_T_SUP 0x80 uint8_t target_port_group[2]; uint8_t reserved; uint8_t status; #define TPG_UNAVLBL 0 #define TPG_SET_BY_STPG 0x01 #define TPG_IMPLICIT 0x02 uint8_t vendor_specific; uint8_t target_port_count; struct scsi_target_port_descriptor descriptors[]; }; struct scsi_target_group_data { uint8_t length[4]; /* length of returned data, in bytes */ struct scsi_target_port_group_descriptor groups[]; }; struct scsi_target_group_data_extended { uint8_t length[4]; /* length of returned data, in bytes */ uint8_t format_type; /* STG_PDF_LENGTH or STG_PDF_EXTENDED */ uint8_t implicit_transition_time; uint8_t reserved[2]; struct scsi_target_port_group_descriptor groups[]; }; struct scsi_security_protocol_in { uint8_t opcode; uint8_t security_protocol; #define SPI_PROT_INFORMATION 0x00 #define SPI_PROT_CBCS 0x07 #define SPI_PROT_TAPE_DATA_ENC 0x20 #define SPI_PROT_DATA_ENC_CONFIG 0x21 #define SPI_PROT_SA_CREATE_CAP 0x40 #define SPI_PROT_IKEV2_SCSI 0x41 #define SPI_PROT_JEDEC_UFS 0xEC #define SPI_PROT_SDCARD_TFSSS 0xED #define SPI_PROT_AUTH_HOST_TRANSIENT 0xEE #define SPI_PROT_ATA_DEVICE_PASSWORD 0xEF uint8_t security_protocol_specific[2]; uint8_t byte4; #define SPI_INC_512 0x80 uint8_t reserved1; uint8_t length[4]; uint8_t reserved2; uint8_t control; }; struct scsi_security_protocol_out { uint8_t opcode; uint8_t security_protocol; uint8_t security_protocol_specific[2]; uint8_t byte4; #define SPO_INC_512 0x80 uint8_t reserved1; uint8_t length[4]; uint8_t reserved2; uint8_t control; }; typedef enum { SSD_TYPE_NONE, SSD_TYPE_FIXED, SSD_TYPE_DESC } scsi_sense_data_type; typedef enum { SSD_ELEM_NONE, SSD_ELEM_SKIP, SSD_ELEM_DESC, SSD_ELEM_SKS, SSD_ELEM_COMMAND, SSD_ELEM_INFO, SSD_ELEM_FRU, SSD_ELEM_STREAM, SSD_ELEM_MAX } scsi_sense_elem_type; struct scsi_sense_data { uint8_t error_code; /* * SPC-4 says that the maximum length of sense data is 252 bytes. * So this structure is exactly 252 bytes log. */ #define SSD_FULL_SIZE 252 uint8_t sense_buf[SSD_FULL_SIZE - 1]; /* * XXX KDM is this still a reasonable minimum size? */ #define SSD_MIN_SIZE 18 /* * Maximum value for the extra_len field in the sense data. */ #define SSD_EXTRA_MAX 244 }; /* * Fixed format sense data. */ struct scsi_sense_data_fixed { u_int8_t error_code; #define SSD_ERRCODE 0x7F #define SSD_CURRENT_ERROR 0x70 #define SSD_DEFERRED_ERROR 0x71 #define SSD_ERRCODE_VALID 0x80 u_int8_t segment; u_int8_t flags; #define SSD_KEY 0x0F #define SSD_KEY_NO_SENSE 0x00 #define SSD_KEY_RECOVERED_ERROR 0x01 #define SSD_KEY_NOT_READY 0x02 #define SSD_KEY_MEDIUM_ERROR 0x03 #define SSD_KEY_HARDWARE_ERROR 0x04 #define SSD_KEY_ILLEGAL_REQUEST 0x05 #define SSD_KEY_UNIT_ATTENTION 0x06 #define SSD_KEY_DATA_PROTECT 0x07 #define SSD_KEY_BLANK_CHECK 0x08 #define SSD_KEY_Vendor_Specific 0x09 #define SSD_KEY_COPY_ABORTED 0x0a #define SSD_KEY_ABORTED_COMMAND 0x0b #define SSD_KEY_EQUAL 0x0c #define SSD_KEY_VOLUME_OVERFLOW 0x0d #define SSD_KEY_MISCOMPARE 0x0e #define SSD_KEY_COMPLETED 0x0f #define SSD_ILI 0x20 #define SSD_EOM 0x40 #define SSD_FILEMARK 0x80 u_int8_t info[4]; u_int8_t extra_len; u_int8_t cmd_spec_info[4]; u_int8_t add_sense_code; u_int8_t add_sense_code_qual; u_int8_t fru; u_int8_t sense_key_spec[3]; #define SSD_SCS_VALID 0x80 #define SSD_FIELDPTR_CMD 0x40 #define SSD_BITPTR_VALID 0x08 #define SSD_BITPTR_VALUE 0x07 u_int8_t extra_bytes[14]; #define SSD_FIXED_IS_PRESENT(sense, length, field) \ ((length >= (offsetof(struct scsi_sense_data_fixed, field) + \ sizeof(sense->field))) ? 1 :0) #define SSD_FIXED_IS_FILLED(sense, field) \ ((((offsetof(struct scsi_sense_data_fixed, field) + \ sizeof(sense->field)) - \ (offsetof(struct scsi_sense_data_fixed, extra_len) + \ sizeof(sense->extra_len))) <= sense->extra_len) ? 1 : 0) }; /* * Descriptor format sense data definitions. * Introduced in SPC-3. */ struct scsi_sense_data_desc { uint8_t error_code; #define SSD_DESC_CURRENT_ERROR 0x72 #define SSD_DESC_DEFERRED_ERROR 0x73 uint8_t sense_key; uint8_t add_sense_code; uint8_t add_sense_code_qual; uint8_t reserved[3]; /* * Note that SPC-4, section 4.5.2.1 says that the extra_len field * must be less than or equal to 244. */ uint8_t extra_len; uint8_t sense_desc[0]; #define SSD_DESC_IS_PRESENT(sense, length, field) \ ((length >= (offsetof(struct scsi_sense_data_desc, field) + \ sizeof(sense->field))) ? 1 :0) }; struct scsi_sense_desc_header { uint8_t desc_type; uint8_t length; }; /* * The information provide in the Information descriptor is device type or * command specific information, and defined in a command standard. * * Note that any changes to the field names or positions in this structure, * even reserved fields, should be accompanied by an examination of the * code in ctl_set_sense() that uses them. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_info { uint8_t desc_type; #define SSD_DESC_INFO 0x00 uint8_t length; uint8_t byte2; #define SSD_INFO_VALID 0x80 uint8_t reserved; uint8_t info[8]; }; /* * Command-specific information depends on the command for which the * reported condition occured. * * Note that any changes to the field names or positions in this structure, * even reserved fields, should be accompanied by an examination of the * code in ctl_set_sense() that uses them. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_command { uint8_t desc_type; #define SSD_DESC_COMMAND 0x01 uint8_t length; uint8_t reserved[2]; uint8_t command_info[8]; }; /* * Sense key specific descriptor. The sense key specific data format * depends on the sense key in question. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_sks { uint8_t desc_type; #define SSD_DESC_SKS 0x02 uint8_t length; uint8_t reserved1[2]; uint8_t sense_key_spec[3]; #define SSD_SKS_VALID 0x80 uint8_t reserved2; }; /* * This is used for the Illegal Request sense key (0x05) only. */ struct scsi_sense_sks_field { uint8_t byte0; #define SSD_SKS_FIELD_VALID 0x80 #define SSD_SKS_FIELD_CMD 0x40 #define SSD_SKS_BPV 0x08 #define SSD_SKS_BIT_VALUE 0x07 uint8_t field[2]; }; /* * This is used for the Hardware Error (0x04), Medium Error (0x03) and * Recovered Error (0x01) sense keys. */ struct scsi_sense_sks_retry { uint8_t byte0; #define SSD_SKS_RETRY_VALID 0x80 uint8_t actual_retry_count[2]; }; /* * Used with the NO Sense (0x00) or Not Ready (0x02) sense keys. */ struct scsi_sense_sks_progress { uint8_t byte0; #define SSD_SKS_PROGRESS_VALID 0x80 uint8_t progress[2]; #define SSD_SKS_PROGRESS_DENOM 0x10000 }; /* * Used with the Copy Aborted (0x0a) sense key. */ struct scsi_sense_sks_segment { uint8_t byte0; #define SSD_SKS_SEGMENT_VALID 0x80 #define SSD_SKS_SEGMENT_SD 0x20 #define SSD_SKS_SEGMENT_BPV 0x08 #define SSD_SKS_SEGMENT_BITPTR 0x07 uint8_t field[2]; }; /* * Used with the Unit Attention (0x06) sense key. * * This is currently used to indicate that the unit attention condition * queue has overflowed (when the overflow bit is set). */ struct scsi_sense_sks_overflow { uint8_t byte0; #define SSD_SKS_OVERFLOW_VALID 0x80 #define SSD_SKS_OVERFLOW_SET 0x01 uint8_t reserved[2]; }; /* * This specifies which component is associated with the sense data. There * is no standard meaning for the fru value. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_fru { uint8_t desc_type; #define SSD_DESC_FRU 0x03 uint8_t length; uint8_t reserved; uint8_t fru; }; /* * Used for Stream commands, defined in SSC-4. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_stream { uint8_t desc_type; #define SSD_DESC_STREAM 0x04 uint8_t length; uint8_t reserved; uint8_t byte3; #define SSD_DESC_STREAM_FM 0x80 #define SSD_DESC_STREAM_EOM 0x40 #define SSD_DESC_STREAM_ILI 0x20 }; /* * Used for Block commands, defined in SBC-3. * * This is currently (as of SBC-3) only used for the Incorrect Length * Indication (ILI) bit, which says that the data length requested in the * READ LONG or WRITE LONG command did not match the length of the logical * block. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_block { uint8_t desc_type; #define SSD_DESC_BLOCK 0x05 uint8_t length; uint8_t reserved; uint8_t byte3; #define SSD_DESC_BLOCK_ILI 0x20 }; /* * Used for Object-Based Storage Devices (OSD-3). * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_osd_objid { uint8_t desc_type; #define SSD_DESC_OSD_OBJID 0x06 uint8_t length; uint8_t reserved[6]; /* * XXX KDM provide the bit definitions here? There are a lot of * them, and we don't have an OSD driver yet. */ uint8_t not_init_cmds[4]; uint8_t completed_cmds[4]; uint8_t partition_id[8]; uint8_t object_id[8]; }; /* * Used for Object-Based Storage Devices (OSD-3). * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_osd_integrity { uint8_t desc_type; #define SSD_DESC_OSD_INTEGRITY 0x07 uint8_t length; uint8_t integ_check_val[32]; }; /* * Used for Object-Based Storage Devices (OSD-3). * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_osd_attr_id { uint8_t desc_type; #define SSD_DESC_OSD_ATTR_ID 0x08 uint8_t length; uint8_t reserved[2]; uint8_t attr_desc[0]; }; /* * Used with Sense keys No Sense (0x00) and Not Ready (0x02). * * Maximum descriptors allowed: 32 (as of SPC-4) */ struct scsi_sense_progress { uint8_t desc_type; #define SSD_DESC_PROGRESS 0x0a uint8_t length; uint8_t sense_key; uint8_t add_sense_code; uint8_t add_sense_code_qual; uint8_t reserved; uint8_t progress[2]; }; /* * This is typically forwarded as the result of an EXTENDED COPY command. * * Maximum descriptors allowed: 2 (as of SPC-4) */ struct scsi_sense_forwarded { uint8_t desc_type; #define SSD_DESC_FORWARDED 0x0c uint8_t length; uint8_t byte2; #define SSD_FORWARDED_FSDT 0x80 #define SSD_FORWARDED_SDS_MASK 0x0f #define SSD_FORWARDED_SDS_UNK 0x00 #define SSD_FORWARDED_SDS_EXSRC 0x01 #define SSD_FORWARDED_SDS_EXDST 0x02 }; /* * Vendor-specific sense descriptor. The desc_type field will be in the * range bewteen MIN and MAX inclusive. */ struct scsi_sense_vendor { uint8_t desc_type; #define SSD_DESC_VENDOR_MIN 0x80 #define SSD_DESC_VENDOR_MAX 0xff uint8_t length; uint8_t data[0]; }; struct scsi_mode_header_6 { u_int8_t data_length; /* Sense data length */ u_int8_t medium_type; u_int8_t dev_spec; u_int8_t blk_desc_len; }; struct scsi_mode_header_10 { u_int8_t data_length[2];/* Sense data length */ u_int8_t medium_type; u_int8_t dev_spec; u_int8_t unused[2]; u_int8_t blk_desc_len[2]; }; struct scsi_mode_page_header { u_int8_t page_code; #define SMPH_PS 0x80 #define SMPH_SPF 0x40 #define SMPH_PC_MASK 0x3f u_int8_t page_length; }; struct scsi_mode_page_header_sp { uint8_t page_code; uint8_t subpage; uint8_t page_length[2]; }; struct scsi_mode_blk_desc { u_int8_t density; u_int8_t nblocks[3]; u_int8_t reserved; u_int8_t blklen[3]; }; #define SCSI_DEFAULT_DENSITY 0x00 /* use 'default' density */ #define SCSI_SAME_DENSITY 0x7f /* use 'same' density- >= SCSI-2 only */ /* * Status Byte */ #define SCSI_STATUS_OK 0x00 #define SCSI_STATUS_CHECK_COND 0x02 #define SCSI_STATUS_COND_MET 0x04 #define SCSI_STATUS_BUSY 0x08 #define SCSI_STATUS_INTERMED 0x10 #define SCSI_STATUS_INTERMED_COND_MET 0x14 #define SCSI_STATUS_RESERV_CONFLICT 0x18 #define SCSI_STATUS_CMD_TERMINATED 0x22 /* Obsolete in SAM-2 */ #define SCSI_STATUS_QUEUE_FULL 0x28 #define SCSI_STATUS_ACA_ACTIVE 0x30 #define SCSI_STATUS_TASK_ABORTED 0x40 struct scsi_inquiry_pattern { u_int8_t type; u_int8_t media_type; #define SIP_MEDIA_REMOVABLE 0x01 #define SIP_MEDIA_FIXED 0x02 const char *vendor; const char *product; const char *revision; }; struct scsi_static_inquiry_pattern { u_int8_t type; u_int8_t media_type; char vendor[SID_VENDOR_SIZE+1]; char product[SID_PRODUCT_SIZE+1]; char revision[SID_REVISION_SIZE+1]; }; struct scsi_sense_quirk_entry { struct scsi_inquiry_pattern inq_pat; int num_sense_keys; int num_ascs; struct sense_key_table_entry *sense_key_info; struct asc_table_entry *asc_info; }; struct sense_key_table_entry { u_int8_t sense_key; u_int32_t action; const char *desc; }; struct asc_table_entry { u_int8_t asc; u_int8_t ascq; u_int32_t action; const char *desc; }; struct op_table_entry { u_int8_t opcode; u_int32_t opmask; const char *desc; }; struct scsi_op_quirk_entry { struct scsi_inquiry_pattern inq_pat; int num_ops; struct op_table_entry *op_table; }; typedef enum { SSS_FLAG_NONE = 0x00, SSS_FLAG_PRINT_COMMAND = 0x01 } scsi_sense_string_flags; struct scsi_nv { const char *name; uint64_t value; }; typedef enum { SCSI_NV_FOUND, SCSI_NV_AMBIGUOUS, SCSI_NV_NOT_FOUND } scsi_nv_status; typedef enum { SCSI_NV_FLAG_NONE = 0x00, SCSI_NV_FLAG_IG_CASE = 0x01 /* Case insensitive comparison */ } scsi_nv_flags; struct ccb_scsiio; struct cam_periph; union ccb; #ifndef _KERNEL struct cam_device; #endif extern const char *scsi_sense_key_text[]; __BEGIN_DECLS void scsi_sense_desc(int sense_key, int asc, int ascq, struct scsi_inquiry_data *inq_data, const char **sense_key_desc, const char **asc_desc); scsi_sense_action scsi_error_action(struct ccb_scsiio* csio, struct scsi_inquiry_data *inq_data, u_int32_t sense_flags); const char * scsi_status_string(struct ccb_scsiio *csio); void scsi_desc_iterate(struct scsi_sense_data_desc *sense, u_int sense_len, int (*iter_func)(struct scsi_sense_data_desc *sense, u_int, struct scsi_sense_desc_header *, void *), void *arg); uint8_t *scsi_find_desc(struct scsi_sense_data_desc *sense, u_int sense_len, uint8_t desc_type); void scsi_set_sense_data(struct scsi_sense_data *sense_data, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, ...) ; void scsi_set_sense_data_va(struct scsi_sense_data *sense_data, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, va_list ap); int scsi_get_sense_info(struct scsi_sense_data *sense_data, u_int sense_len, uint8_t info_type, uint64_t *info, int64_t *signed_info); int scsi_get_sks(struct scsi_sense_data *sense_data, u_int sense_len, uint8_t *sks); int scsi_get_block_info(struct scsi_sense_data *sense_data, u_int sense_len, struct scsi_inquiry_data *inq_data, uint8_t *block_bits); int scsi_get_stream_info(struct scsi_sense_data *sense_data, u_int sense_len, struct scsi_inquiry_data *inq_data, uint8_t *stream_bits); void scsi_info_sbuf(struct sbuf *sb, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, uint64_t info); void scsi_command_sbuf(struct sbuf *sb, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, uint64_t csi); void scsi_progress_sbuf(struct sbuf *sb, uint16_t progress); int scsi_sks_sbuf(struct sbuf *sb, int sense_key, uint8_t *sks); void scsi_fru_sbuf(struct sbuf *sb, uint64_t fru); void scsi_stream_sbuf(struct sbuf *sb, uint8_t stream_bits, uint64_t info); void scsi_block_sbuf(struct sbuf *sb, uint8_t block_bits, uint64_t info); void scsi_sense_info_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_command_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_sks_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_fru_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_stream_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_block_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_progress_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_generic_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_desc_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); scsi_sense_data_type scsi_sense_type(struct scsi_sense_data *sense_data); void scsi_sense_only_sbuf(struct scsi_sense_data *sense, u_int sense_len, struct sbuf *sb, char *path_str, struct scsi_inquiry_data *inq_data, uint8_t *cdb, int cdb_len); #ifdef _KERNEL int scsi_command_string(struct ccb_scsiio *csio, struct sbuf *sb); int scsi_sense_sbuf(struct ccb_scsiio *csio, struct sbuf *sb, scsi_sense_string_flags flags); char * scsi_sense_string(struct ccb_scsiio *csio, char *str, int str_len); void scsi_sense_print(struct ccb_scsiio *csio); int scsi_vpd_supported_page(struct cam_periph *periph, uint8_t page_id); #else /* _KERNEL */ int scsi_command_string(struct cam_device *device, struct ccb_scsiio *csio, struct sbuf *sb); int scsi_sense_sbuf(struct cam_device *device, struct ccb_scsiio *csio, struct sbuf *sb, scsi_sense_string_flags flags); char * scsi_sense_string(struct cam_device *device, struct ccb_scsiio *csio, char *str, int str_len); void scsi_sense_print(struct cam_device *device, struct ccb_scsiio *csio, FILE *ofile); #endif /* _KERNEL */ const char * scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data); char * scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string, size_t len); +void scsi_cdb_sbuf(u_int8_t *cdb_ptr, struct sbuf *sb); void scsi_print_inquiry(struct scsi_inquiry_data *inq_data); void scsi_print_inquiry_short(struct scsi_inquiry_data *inq_data); u_int scsi_calc_syncsrate(u_int period_factor); u_int scsi_calc_syncparam(u_int period); typedef int (*scsi_devid_checkfn_t)(uint8_t *); int scsi_devid_is_naa_ieee_reg(uint8_t *bufp); int scsi_devid_is_sas_target(uint8_t *bufp); int scsi_devid_is_lun_eui64(uint8_t *bufp); int scsi_devid_is_lun_naa(uint8_t *bufp); int scsi_devid_is_lun_name(uint8_t *bufp); int scsi_devid_is_lun_t10(uint8_t *bufp); struct scsi_vpd_id_descriptor * scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t len, scsi_devid_checkfn_t ck_fn); struct scsi_vpd_id_descriptor * scsi_get_devid_desc(struct scsi_vpd_id_descriptor *desc, uint32_t len, scsi_devid_checkfn_t ck_fn); int scsi_transportid_sbuf(struct sbuf *sb, struct scsi_transportid_header *hdr, uint32_t valid_len); const char * scsi_nv_to_str(struct scsi_nv *table, int num_table_entries, uint64_t value); scsi_nv_status scsi_get_nv(struct scsi_nv *table, int num_table_entries, char *name, int *table_entry, scsi_nv_flags flags); int scsi_parse_transportid_64bit(int proto_id, char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len); int scsi_parse_transportid_spi(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len); int scsi_parse_transportid_rdma(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len); int scsi_parse_transportid_iscsi(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str,int error_str_len); int scsi_parse_transportid_sop(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str,int error_str_len); int scsi_parse_transportid(char *transportid_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len); int scsi_attrib_volcoh_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int scsi_attrib_vendser_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int scsi_attrib_hexdump_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int scsi_attrib_int_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int scsi_attrib_ascii_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int scsi_attrib_text_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); struct scsi_attrib_table_entry *scsi_find_attrib_entry( struct scsi_attrib_table_entry *table, size_t num_table_entries, uint32_t id); struct scsi_attrib_table_entry *scsi_get_attrib_entry(uint32_t id); int scsi_attrib_value_sbuf(struct sbuf *sb, uint32_t valid_len, struct scsi_mam_attribute_header *hdr, uint32_t output_flags, char *error_str, size_t error_str_len); void scsi_attrib_prefix_sbuf(struct sbuf *sb, uint32_t output_flags, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, const char *desc); int scsi_attrib_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, struct scsi_attrib_table_entry *user_table, size_t num_user_entries, int prefer_user_table, uint32_t output_flags, char *error_str, int error_str_len); void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout); void scsi_request_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), void *data_ptr, u_int8_t dxfer_len, u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout); void scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *inq_buf, u_int32_t inq_len, int evpd, u_int8_t page_code, u_int8_t sense_len, u_int32_t timeout); void scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int dbd, u_int8_t page_code, u_int8_t page, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout); void scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int dbd, u_int8_t page_code, u_int8_t page, u_int8_t *param_buf, u_int32_t param_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout); void scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int scsi_page_fmt, int save_pages, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout); void scsi_mode_select_len(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int scsi_page_fmt, int save_pages, u_int8_t *param_buf, u_int32_t param_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout); void scsi_log_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t page_code, u_int8_t page, int save_pages, int ppc, u_int32_t paramptr, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout); void scsi_log_select(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t page_code, int save_pages, int pc_reset, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout); void scsi_prevent(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t action, u_int8_t sense_len, u_int32_t timeout); void scsi_read_capacity(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, struct scsi_read_capacity_data *, u_int8_t sense_len, u_int32_t timeout); void scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint64_t lba, int reladr, int pmi, uint8_t *rcap_buf, int rcap_buf_len, uint8_t sense_len, uint32_t timeout); void scsi_report_luns(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t select_report, struct scsi_report_luns_data *rpl_buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout); void scsi_report_target_group(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t pdf, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout); void scsi_set_target_group(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout); void scsi_synchronize_cache(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t begin_lba, u_int16_t lb_count, u_int8_t sense_len, u_int32_t timeout); void scsi_receive_diagnostic_results(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int pcv, uint8_t page_code, uint8_t *data_ptr, uint16_t allocation_length, uint8_t sense_len, uint32_t timeout); void scsi_send_diagnostic(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int unit_offline, int device_offline, int self_test, int page_format, int self_test_code, uint8_t *data_ptr, uint16_t param_list_length, uint8_t sense_len, uint32_t timeout); void scsi_read_buffer(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int mode, uint8_t buffer_id, u_int32_t offset, uint8_t *data_ptr, uint32_t allocation_length, uint8_t sense_len, uint32_t timeout); void scsi_write_buffer(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int mode, uint8_t buffer_id, u_int32_t offset, uint8_t *data_ptr, uint32_t param_list_length, uint8_t sense_len, uint32_t timeout); #define SCSI_RW_READ 0x0001 #define SCSI_RW_WRITE 0x0002 #define SCSI_RW_DIRMASK 0x0003 #define SCSI_RW_BIO 0x1000 void scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int readop, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_write_same(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_ata_identify(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_ata_trim(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int16_t block_count, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_ata_pass_16(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t protocol, u_int8_t ata_flags, u_int16_t features, u_int16_t sector_count, uint64_t lba, u_int8_t command, u_int8_t control, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_unmap(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int start, int load_eject, int immediate, u_int8_t sense_len, u_int32_t timeout); void scsi_read_attribute(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t service_action, uint32_t element, u_int8_t elem_type, int logical_volume, int partition, u_int32_t first_attribute, int cache, u_int8_t *data_ptr, u_int32_t length, int sense_len, u_int32_t timeout); void scsi_write_attribute(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, uint32_t element, int logical_volume, int partition, int wtc, u_int8_t *data_ptr, u_int32_t length, int sense_len, u_int32_t timeout); void scsi_security_protocol_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint32_t security_protocol, uint32_t security_protocol_specific, int byte4, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout); void scsi_security_protocol_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *,union ccb *), uint8_t tag_action, uint32_t security_protocol, uint32_t security_protocol_specific, int byte4, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout); void scsi_persistent_reserve_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *,union ccb *), uint8_t tag_action, int service_action, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout); void scsi_persistent_reserve_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int service_action, int scope, int res_type, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout); void scsi_report_supported_opcodes(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int options, int req_opcode, int req_service_action, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout); int scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry); int scsi_static_inquiry_match(caddr_t inqbuffer, caddr_t table_entry); int scsi_devid_match(uint8_t *rhs, size_t rhs_len, uint8_t *lhs, size_t lhs_len); void scsi_extract_sense(struct scsi_sense_data *sense, int *error_code, int *sense_key, int *asc, int *ascq); int scsi_extract_sense_ccb(union ccb *ccb, int *error_code, int *sense_key, int *asc, int *ascq); void scsi_extract_sense_len(struct scsi_sense_data *sense, u_int sense_len, int *error_code, int *sense_key, int *asc, int *ascq, int show_errors); int scsi_get_sense_key(struct scsi_sense_data *sense, u_int sense_len, int show_errors); int scsi_get_asc(struct scsi_sense_data *sense, u_int sense_len, int show_errors); int scsi_get_ascq(struct scsi_sense_data *sense, u_int sense_len, int show_errors); static __inline void scsi_ulto2b(u_int32_t val, u_int8_t *bytes); static __inline void scsi_ulto3b(u_int32_t val, u_int8_t *bytes); static __inline void scsi_ulto4b(u_int32_t val, u_int8_t *bytes); static __inline void scsi_u64to8b(u_int64_t val, u_int8_t *bytes); static __inline uint32_t scsi_2btoul(const uint8_t *bytes); static __inline uint32_t scsi_3btoul(const uint8_t *bytes); static __inline int32_t scsi_3btol(const uint8_t *bytes); static __inline uint32_t scsi_4btoul(const uint8_t *bytes); static __inline uint64_t scsi_8btou64(const uint8_t *bytes); static __inline void *find_mode_page_6(struct scsi_mode_header_6 *mode_header); static __inline void *find_mode_page_10(struct scsi_mode_header_10 *mode_header); static __inline void scsi_ulto2b(u_int32_t val, u_int8_t *bytes) { bytes[0] = (val >> 8) & 0xff; bytes[1] = val & 0xff; } static __inline void scsi_ulto3b(u_int32_t val, u_int8_t *bytes) { bytes[0] = (val >> 16) & 0xff; bytes[1] = (val >> 8) & 0xff; bytes[2] = val & 0xff; } static __inline void scsi_ulto4b(u_int32_t val, u_int8_t *bytes) { bytes[0] = (val >> 24) & 0xff; bytes[1] = (val >> 16) & 0xff; bytes[2] = (val >> 8) & 0xff; bytes[3] = val & 0xff; } static __inline void scsi_u64to8b(u_int64_t val, u_int8_t *bytes) { bytes[0] = (val >> 56) & 0xff; bytes[1] = (val >> 48) & 0xff; bytes[2] = (val >> 40) & 0xff; bytes[3] = (val >> 32) & 0xff; bytes[4] = (val >> 24) & 0xff; bytes[5] = (val >> 16) & 0xff; bytes[6] = (val >> 8) & 0xff; bytes[7] = val & 0xff; } static __inline uint32_t scsi_2btoul(const uint8_t *bytes) { uint32_t rv; rv = (bytes[0] << 8) | bytes[1]; return (rv); } static __inline uint32_t scsi_3btoul(const uint8_t *bytes) { uint32_t rv; rv = (bytes[0] << 16) | (bytes[1] << 8) | bytes[2]; return (rv); } static __inline int32_t scsi_3btol(const uint8_t *bytes) { uint32_t rc = scsi_3btoul(bytes); if (rc & 0x00800000) rc |= 0xff000000; return (int32_t) rc; } static __inline uint32_t scsi_4btoul(const uint8_t *bytes) { uint32_t rv; rv = (bytes[0] << 24) | (bytes[1] << 16) | (bytes[2] << 8) | bytes[3]; return (rv); } static __inline uint64_t scsi_8btou64(const uint8_t *bytes) { uint64_t rv; rv = (((uint64_t)bytes[0]) << 56) | (((uint64_t)bytes[1]) << 48) | (((uint64_t)bytes[2]) << 40) | (((uint64_t)bytes[3]) << 32) | (((uint64_t)bytes[4]) << 24) | (((uint64_t)bytes[5]) << 16) | (((uint64_t)bytes[6]) << 8) | bytes[7]; return (rv); } /* * Given the pointer to a returned mode sense buffer, return a pointer to * the start of the first mode page. */ static __inline void * find_mode_page_6(struct scsi_mode_header_6 *mode_header) { void *page_start; page_start = (void *)((u_int8_t *)&mode_header[1] + mode_header->blk_desc_len); return(page_start); } static __inline void * find_mode_page_10(struct scsi_mode_header_10 *mode_header) { void *page_start; page_start = (void *)((u_int8_t *)&mode_header[1] + scsi_2btoul(mode_header->blk_desc_len)); return(page_start); } __END_DECLS #endif /*_SCSI_SCSI_ALL_H*/ Index: projects/release-pkg/sys/dev/ahci/ahci_pci.c =================================================================== --- projects/release-pkg/sys/dev/ahci/ahci_pci.c (revision 297926) +++ projects/release-pkg/sys/dev/ahci/ahci_pci.c (revision 297927) @@ -1,630 +1,632 @@ /*- * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ahci.h" static int force_ahci = 1; TUNABLE_INT("hw.ahci.force", &force_ahci); static const struct { uint32_t id; uint8_t rev; const char *name; int quirks; } ahci_ids[] = { {0x43801002, 0x00, "AMD SB600", AHCI_Q_NOMSI | AHCI_Q_ATI_PMP_BUG | AHCI_Q_MAXIO_64K}, {0x43901002, 0x00, "AMD SB7x0/SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG | AHCI_Q_1MSI}, {0x43911002, 0x00, "AMD SB7x0/SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG | AHCI_Q_1MSI}, {0x43921002, 0x00, "AMD SB7x0/SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG | AHCI_Q_1MSI}, {0x43931002, 0x00, "AMD SB7x0/SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG | AHCI_Q_1MSI}, {0x43941002, 0x00, "AMD SB7x0/SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG | AHCI_Q_1MSI}, /* Not sure SB8x0/SB9x0 needs this quirk. Be conservative though */ {0x43951002, 0x00, "AMD SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG}, {0x78001022, 0x00, "AMD Hudson-2", 0}, {0x78011022, 0x00, "AMD Hudson-2", 0}, {0x78021022, 0x00, "AMD Hudson-2", 0}, {0x78031022, 0x00, "AMD Hudson-2", 0}, {0x78041022, 0x00, "AMD Hudson-2", 0}, {0x06111b21, 0x00, "ASMedia ASM2106", 0}, {0x06121b21, 0x00, "ASMedia ASM1061", 0}, {0x26528086, 0x00, "Intel ICH6", AHCI_Q_NOFORCE}, {0x26538086, 0x00, "Intel ICH6M", AHCI_Q_NOFORCE}, {0x26818086, 0x00, "Intel ESB2", 0}, {0x26828086, 0x00, "Intel ESB2", 0}, {0x26838086, 0x00, "Intel ESB2", 0}, {0x27c18086, 0x00, "Intel ICH7", 0}, {0x27c38086, 0x00, "Intel ICH7", 0}, {0x27c58086, 0x00, "Intel ICH7M", 0}, {0x27c68086, 0x00, "Intel ICH7M", 0}, {0x28218086, 0x00, "Intel ICH8", 0}, {0x28228086, 0x00, "Intel ICH8", 0}, {0x28248086, 0x00, "Intel ICH8", 0}, {0x28298086, 0x00, "Intel ICH8M", 0}, {0x282a8086, 0x00, "Intel ICH8M", 0}, {0x29228086, 0x00, "Intel ICH9", 0}, {0x29238086, 0x00, "Intel ICH9", 0}, {0x29248086, 0x00, "Intel ICH9", 0}, {0x29258086, 0x00, "Intel ICH9", 0}, {0x29278086, 0x00, "Intel ICH9", 0}, {0x29298086, 0x00, "Intel ICH9M", 0}, {0x292a8086, 0x00, "Intel ICH9M", 0}, {0x292b8086, 0x00, "Intel ICH9M", 0}, {0x292c8086, 0x00, "Intel ICH9M", 0}, {0x292f8086, 0x00, "Intel ICH9M", 0}, {0x294d8086, 0x00, "Intel ICH9", 0}, {0x294e8086, 0x00, "Intel ICH9M", 0}, {0x3a058086, 0x00, "Intel ICH10", 0}, {0x3a228086, 0x00, "Intel ICH10", 0}, {0x3a258086, 0x00, "Intel ICH10", 0}, {0x3b228086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x3b238086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x3b258086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x3b298086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x3b2c8086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x3b2f8086, 0x00, "Intel 5 Series/3400 Series", 0}, {0x1c028086, 0x00, "Intel Cougar Point", 0}, {0x1c038086, 0x00, "Intel Cougar Point", 0}, {0x1c048086, 0x00, "Intel Cougar Point", 0}, {0x1c058086, 0x00, "Intel Cougar Point", 0}, {0x1d028086, 0x00, "Intel Patsburg", 0}, {0x1d048086, 0x00, "Intel Patsburg", 0}, {0x1d068086, 0x00, "Intel Patsburg", 0}, {0x28268086, 0x00, "Intel Patsburg (RAID)", 0}, {0x1e028086, 0x00, "Intel Panther Point", 0}, {0x1e038086, 0x00, "Intel Panther Point", 0}, {0x1e048086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1e058086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1e068086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1e078086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1e0e8086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1e0f8086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1f228086, 0x00, "Intel Avoton", 0}, {0x1f238086, 0x00, "Intel Avoton", 0}, {0x1f248086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f258086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f268086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f278086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f2e8086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f2f8086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f328086, 0x00, "Intel Avoton", 0}, {0x1f338086, 0x00, "Intel Avoton", 0}, {0x1f348086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f358086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f368086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f378086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f3e8086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f3f8086, 0x00, "Intel Avoton (RAID)", 0}, {0x23a38086, 0x00, "Intel Coleto Creek", 0}, {0x28238086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x28278086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x8c028086, 0x00, "Intel Lynx Point", 0}, {0x8c038086, 0x00, "Intel Lynx Point", 0}, {0x8c048086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c058086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c068086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c078086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c0e8086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c0f8086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c828086, 0x00, "Intel Wildcat Point", 0}, {0x8c838086, 0x00, "Intel Wildcat Point", 0}, {0x8c848086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8c858086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8c868086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8c878086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8c8e8086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8c8f8086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8d028086, 0x00, "Intel Wellsburg", 0}, {0x8d048086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x8d068086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x8d628086, 0x00, "Intel Wellsburg", 0}, {0x8d648086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x8d668086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x8d6e8086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x9c028086, 0x00, "Intel Lynx Point-LP", 0}, {0x9c038086, 0x00, "Intel Lynx Point-LP", 0}, {0x9c048086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c058086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c068086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c078086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c0e8086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c0f8086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x23238086, 0x00, "Intel DH89xxCC", 0}, {0x2360197b, 0x00, "JMicron JMB360", 0}, {0x2361197b, 0x00, "JMicron JMB361", AHCI_Q_NOFORCE}, {0x2362197b, 0x00, "JMicron JMB362", 0}, {0x2363197b, 0x00, "JMicron JMB363", AHCI_Q_NOFORCE}, {0x2365197b, 0x00, "JMicron JMB365", AHCI_Q_NOFORCE}, {0x2366197b, 0x00, "JMicron JMB366", AHCI_Q_NOFORCE}, {0x2368197b, 0x00, "JMicron JMB368", AHCI_Q_NOFORCE}, {0x611111ab, 0x00, "Marvell 88SE6111", AHCI_Q_NOFORCE | AHCI_Q_NOPMP | AHCI_Q_1CH | AHCI_Q_EDGEIS}, {0x612111ab, 0x00, "Marvell 88SE6121", AHCI_Q_NOFORCE | AHCI_Q_NOPMP | AHCI_Q_2CH | AHCI_Q_EDGEIS | AHCI_Q_NONCQ | AHCI_Q_NOCOUNT}, {0x614111ab, 0x00, "Marvell 88SE6141", AHCI_Q_NOFORCE | AHCI_Q_NOPMP | AHCI_Q_4CH | AHCI_Q_EDGEIS | AHCI_Q_NONCQ | AHCI_Q_NOCOUNT}, {0x614511ab, 0x00, "Marvell 88SE6145", AHCI_Q_NOFORCE | AHCI_Q_NOPMP | AHCI_Q_4CH | AHCI_Q_EDGEIS | AHCI_Q_NONCQ | AHCI_Q_NOCOUNT}, {0x91201b4b, 0x00, "Marvell 88SE912x", AHCI_Q_EDGEIS}, {0x91231b4b, 0x11, "Marvell 88SE912x", AHCI_Q_ALTSIG}, {0x91231b4b, 0x00, "Marvell 88SE912x", AHCI_Q_EDGEIS|AHCI_Q_SATA2}, {0x91251b4b, 0x00, "Marvell 88SE9125", 0}, {0x91281b4b, 0x00, "Marvell 88SE9128", AHCI_Q_ALTSIG}, {0x91301b4b, 0x00, "Marvell 88SE9130", AHCI_Q_ALTSIG}, {0x91721b4b, 0x00, "Marvell 88SE9172", 0}, {0x91821b4b, 0x00, "Marvell 88SE9182", 0}, {0x91831b4b, 0x00, "Marvell 88SS9183", 0}, {0x91a01b4b, 0x00, "Marvell 88SE91Ax", 0}, {0x92151b4b, 0x00, "Marvell 88SE9215", 0}, {0x92201b4b, 0x00, "Marvell 88SE9220", AHCI_Q_ALTSIG}, {0x92301b4b, 0x00, "Marvell 88SE9230", AHCI_Q_ALTSIG}, {0x92351b4b, 0x00, "Marvell 88SE9235", 0}, {0x06201103, 0x00, "HighPoint RocketRAID 620", 0}, {0x06201b4b, 0x00, "HighPoint RocketRAID 620", 0}, {0x06221103, 0x00, "HighPoint RocketRAID 622", 0}, {0x06221b4b, 0x00, "HighPoint RocketRAID 622", 0}, {0x06401103, 0x00, "HighPoint RocketRAID 640", 0}, {0x06401b4b, 0x00, "HighPoint RocketRAID 640", 0}, {0x06441103, 0x00, "HighPoint RocketRAID 644", 0}, {0x06441b4b, 0x00, "HighPoint RocketRAID 644", 0}, {0x06411103, 0x00, "HighPoint RocketRAID 640L", 0}, {0x06421103, 0x00, "HighPoint RocketRAID 642L", 0}, {0x06451103, 0x00, "HighPoint RocketRAID 644L", 0}, {0x044c10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x044d10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x044e10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x044f10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045c10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045d10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045e10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045f10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x055010de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055110de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055210de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055310de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055410de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055510de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055610de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055710de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055810de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055910de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055A10de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055B10de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x058410de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x07f010de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f110de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f210de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f310de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f410de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f510de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f610de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f710de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f810de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f910de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07fa10de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07fb10de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x0ad010de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad110de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad210de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad310de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad410de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad510de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad610de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad710de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad810de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad910de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ada10de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0adb10de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ab410de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab510de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab610de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab710de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab810de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab910de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0aba10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abb10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abc10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abd10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abe10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abf10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0d8410de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8510de, 0x00, "NVIDIA MCP89", AHCI_Q_NOFORCE|AHCI_Q_NOAA}, {0x0d8610de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8710de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8810de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8910de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8a10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8b10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8c10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8d10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8e10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8f10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x3781105a, 0x00, "Promise TX8660", 0}, {0x33491106, 0x00, "VIA VT8251", AHCI_Q_NOPMP|AHCI_Q_NONCQ}, {0x62871106, 0x00, "VIA VT8251", AHCI_Q_NOPMP|AHCI_Q_NONCQ}, {0x11841039, 0x00, "SiS 966", 0}, {0x11851039, 0x00, "SiS 968", 0}, {0x01861039, 0x00, "SiS 968", 0}, {0xa01c177d, 0x00, "ThunderX", AHCI_Q_ABAR0|AHCI_Q_1MSI}, {0x00311c36, 0x00, "Annapurna", AHCI_Q_FORCE_PI|AHCI_Q_RESTORE_CAP|AHCI_Q_NOMSIX}, {0x00000000, 0x00, NULL, 0} }; static int ahci_pci_ctlr_reset(device_t dev) { if (pci_read_config(dev, PCIR_DEVVENDOR, 4) == 0x28298086 && (pci_read_config(dev, 0x92, 1) & 0xfe) == 0x04) pci_write_config(dev, 0x92, 0x01, 1); return ahci_ctlr_reset(dev); } static int ahci_probe(device_t dev) { char buf[64]; int i, valid = 0; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); /* * Ensure it is not a PCI bridge (some vendors use * the same PID and VID in PCI bridge and AHCI cards). */ if (pci_get_class(dev) == PCIC_BRIDGE) return (ENXIO); /* Is this a possible AHCI candidate? */ if (pci_get_class(dev) == PCIC_STORAGE && pci_get_subclass(dev) == PCIS_STORAGE_SATA && pci_get_progif(dev) == PCIP_STORAGE_SATA_AHCI_1_0) valid = 1; else if (pci_get_class(dev) == PCIC_STORAGE && pci_get_subclass(dev) == PCIS_STORAGE_RAID) valid = 2; /* Is this a known AHCI chip? */ for (i = 0; ahci_ids[i].id != 0; i++) { if (ahci_ids[i].id == devid && ahci_ids[i].rev <= revid && (valid || (force_ahci == 1 && !(ahci_ids[i].quirks & AHCI_Q_NOFORCE)))) { /* Do not attach JMicrons with single PCI function. */ if (pci_get_vendor(dev) == 0x197b && (pci_read_config(dev, 0xdf, 1) & 0x40) == 0) return (ENXIO); snprintf(buf, sizeof(buf), "%s AHCI SATA controller", ahci_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } if (valid != 1) return (ENXIO); device_set_desc_copy(dev, "AHCI SATA controller"); return (BUS_PROBE_DEFAULT); } static int ahci_ata_probe(device_t dev) { char buf[64]; int i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); if ((intptr_t)device_get_ivars(dev) >= 0) return (ENXIO); /* Is this a known AHCI chip? */ for (i = 0; ahci_ids[i].id != 0; i++) { if (ahci_ids[i].id == devid && ahci_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s AHCI SATA controller", ahci_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } device_set_desc_copy(dev, "AHCI SATA controller"); return (BUS_PROBE_DEFAULT); } static int ahci_pci_read_msix_bars(device_t dev, uint8_t *table_bar, uint8_t *pba_bar) { int cap_offset = 0, ret; uint32_t val; if ((table_bar == NULL) || (pba_bar == NULL)) return (EINVAL); ret = pci_find_cap(dev, PCIY_MSIX, &cap_offset); if (ret != 0) return (EINVAL); val = pci_read_config(dev, cap_offset + PCIR_MSIX_TABLE, 4); *table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); val = pci_read_config(dev, cap_offset + PCIR_MSIX_PBA, 4); *pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); return (0); } static int ahci_pci_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int error, i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); int msi_count, msix_count; uint8_t table_bar = 0, pba_bar = 0; msi_count = pci_msi_count(dev); msix_count = pci_msix_count(dev); i = 0; while (ahci_ids[i].id != 0 && (ahci_ids[i].id != devid || ahci_ids[i].rev > revid)) i++; ctlr->quirks = ahci_ids[i].quirks; /* Limit speed for my onboard JMicron external port. * It is not eSATA really, limit to SATA 1 */ if (pci_get_devid(dev) == 0x2363197b && pci_get_subvendor(dev) == 0x1043 && pci_get_subdevice(dev) == 0x81e4) ctlr->quirks |= AHCI_Q_SATA1_UNIT0; + resource_int_value(device_get_name(dev), device_get_unit(dev), + "quirks", &ctlr->quirks); ctlr->vendorid = pci_get_vendor(dev); ctlr->deviceid = pci_get_device(dev); ctlr->subvendorid = pci_get_subvendor(dev); ctlr->subdeviceid = pci_get_subdevice(dev); /* Default AHCI Base Address is BAR(5), Cavium uses BAR(0) */ if (ctlr->quirks & AHCI_Q_ABAR0) ctlr->r_rid = PCIR_BAR(0); else ctlr->r_rid = PCIR_BAR(5); if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; if (ctlr->quirks & AHCI_Q_NOMSIX) msix_count = 0; /* Read MSI-x BAR IDs if supported */ if (msix_count > 0) { error = ahci_pci_read_msix_bars(dev, &table_bar, &pba_bar); if (error == 0) { ctlr->r_msix_tab_rid = table_bar; ctlr->r_msix_pba_rid = pba_bar; } else { /* Failed to read BARs, disable MSI-x */ msix_count = 0; } } /* Allocate resources for MSI-x table and PBA */ if (msix_count > 0) { /* * Allocate new MSI-x table only if not * allocated before. */ ctlr->r_msix_table = NULL; if (ctlr->r_msix_tab_rid != ctlr->r_rid) { /* Separate BAR for MSI-x */ ctlr->r_msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_msix_tab_rid, RF_ACTIVE); if (ctlr->r_msix_table == NULL) { ahci_free_mem(dev); return (ENXIO); } } /* * Allocate new PBA table only if not * allocated before. */ ctlr->r_msix_pba = NULL; if ((ctlr->r_msix_pba_rid != ctlr->r_msix_tab_rid) && (ctlr->r_msix_pba_rid != ctlr->r_rid)) { /* Separate BAR for PBA */ ctlr->r_msix_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_msix_pba_rid, RF_ACTIVE); if (ctlr->r_msix_pba == NULL) { ahci_free_mem(dev); return (ENXIO); } } } pci_enable_busmaster(dev); /* Reset controller */ if ((error = ahci_pci_ctlr_reset(dev)) != 0) { ahci_free_mem(dev); return (error); } /* Setup interrupts. */ /* Setup MSI register parameters */ /* Process hints. */ if (ctlr->quirks & AHCI_Q_NOMSI) ctlr->msi = 0; else if (ctlr->quirks & AHCI_Q_1MSI) ctlr->msi = 1; else ctlr->msi = 2; resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &ctlr->msi); ctlr->numirqs = 1; if (msi_count == 0 && msix_count == 0) ctlr->msi = 0; if (ctlr->msi < 0) ctlr->msi = 0; else if (ctlr->msi == 1) { msi_count = min(1, msi_count); msix_count = min(1, msix_count); } else if (ctlr->msi > 1) ctlr->msi = 2; /* Allocate MSI/MSI-x if needed/present. */ if (ctlr->msi > 0) { error = ENXIO; /* Try to allocate MSI-x first */ if (msix_count > 0) { error = pci_alloc_msix(dev, &msix_count); if (error == 0) ctlr->numirqs = msix_count; } /* * Try to allocate MSI if msi_count is greater than 0 * and if MSI-x allocation failed. */ if ((error != 0) && (msi_count > 0)) { error = pci_alloc_msi(dev, &msi_count); if (error == 0) ctlr->numirqs = msi_count; } /* Both MSI and MSI-x allocations failed */ if (error != 0) { ctlr->msi = 0; device_printf(dev, "Failed to allocate MSI/MSI-x, " "falling back to INTx\n"); } } error = ahci_attach(dev); if (error != 0) { if (ctlr->msi > 0) pci_release_msi(dev); ahci_free_mem(dev); } return error; } static int ahci_pci_detach(device_t dev) { ahci_detach(dev); pci_release_msi(dev); return (0); } static int ahci_pci_suspend(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Disable interupts, so the state change(s) doesn't trigger */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, ATA_INL(ctlr->r_mem, AHCI_GHC) & (~AHCI_GHC_IE)); return 0; } static int ahci_pci_resume(device_t dev) { int res; if ((res = ahci_pci_ctlr_reset(dev)) != 0) return (res); ahci_ctlr_setup(dev); return (bus_generic_resume(dev)); } devclass_t ahci_devclass; static device_method_t ahci_methods[] = { DEVMETHOD(device_probe, ahci_probe), DEVMETHOD(device_attach, ahci_pci_attach), DEVMETHOD(device_detach, ahci_pci_detach), DEVMETHOD(device_suspend, ahci_pci_suspend), DEVMETHOD(device_resume, ahci_pci_resume), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), DEVMETHOD(bus_child_location_str, ahci_child_location_str), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static driver_t ahci_driver = { "ahci", ahci_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci, pci, ahci_driver, ahci_devclass, NULL, NULL); static device_method_t ahci_ata_methods[] = { DEVMETHOD(device_probe, ahci_ata_probe), DEVMETHOD(device_attach, ahci_pci_attach), DEVMETHOD(device_detach, ahci_pci_detach), DEVMETHOD(device_suspend, ahci_pci_suspend), DEVMETHOD(device_resume, ahci_pci_resume), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), DEVMETHOD(bus_child_location_str, ahci_child_location_str), DEVMETHOD_END }; static driver_t ahci_ata_driver = { "ahci", ahci_ata_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci, atapci, ahci_ata_driver, ahci_devclass, NULL, NULL); Index: projects/release-pkg/sys/dev/bxe/bxe.c =================================================================== --- projects/release-pkg/sys/dev/bxe/bxe.c (revision 297926) +++ projects/release-pkg/sys/dev/bxe/bxe.c (revision 297927) @@ -1,19138 +1,19051 @@ /*- * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define BXE_DRIVER_VERSION "1.78.81" #include "bxe.h" #include "ecore_sp.h" #include "ecore_init.h" #include "ecore_init_ops.h" #include "57710_int_offsets.h" #include "57711_int_offsets.h" #include "57712_int_offsets.h" /* * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these * explicitly here for older kernels that don't include this changeset. */ #ifndef CTLTYPE_U64 #define CTLTYPE_U64 CTLTYPE_QUAD #define sysctl_handle_64 sysctl_handle_quad #endif /* * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these * here as zero(0) for older kernels that don't include this changeset * thereby masking the functionality. */ #ifndef CSUM_TCP_IPV6 #define CSUM_TCP_IPV6 0 #define CSUM_UDP_IPV6 0 #endif /* * pci_find_cap was added in r219865. Re-define this at pci_find_extcap * for older kernels that don't include this changeset. */ #if __FreeBSD_version < 900035 #define pci_find_cap pci_find_extcap #endif #define BXE_DEF_SB_ATT_IDX 0x0001 #define BXE_DEF_SB_IDX 0x0002 /* * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per * function HW initialization. */ #define FLR_WAIT_USEC 10000 /* 10 msecs */ #define FLR_WAIT_INTERVAL 50 /* usecs */ #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ struct pbf_pN_buf_regs { int pN; uint32_t init_crd; uint32_t crd; uint32_t crd_freed; }; struct pbf_pN_cmd_regs { int pN; uint32_t lines_occup; uint32_t lines_freed; }; /* * PCI Device ID Table used by bxe_probe(). */ #define BXE_DEVDESC_MAX 64 static struct bxe_device_type bxe_devs[] = { { BRCM_VENDORID, CHIP_NUM_57710, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57710 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57711, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57711 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57711E, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57711E 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57712, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57712_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57800, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57800_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57810, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57810_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57811, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57811_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_4_10, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 4x10GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 MF 10GbE" }, { 0, 0, 0, 0, NULL } }; MALLOC_DECLARE(M_BXE_ILT); MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); /* * FreeBSD device entry points. */ static int bxe_probe(device_t); static int bxe_attach(device_t); static int bxe_detach(device_t); static int bxe_shutdown(device_t); /* * FreeBSD KLD module/device interface event handler method. */ static device_method_t bxe_methods[] = { /* Device interface (device_if.h) */ DEVMETHOD(device_probe, bxe_probe), DEVMETHOD(device_attach, bxe_attach), DEVMETHOD(device_detach, bxe_detach), DEVMETHOD(device_shutdown, bxe_shutdown), /* Bus interface (bus_if.h) */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), KOBJMETHOD_END }; /* * FreeBSD KLD Module data declaration */ static driver_t bxe_driver = { "bxe", /* module name */ bxe_methods, /* event handler */ sizeof(struct bxe_softc) /* extra data */ }; /* * FreeBSD dev class is needed to manage dev instances and * to associate with a bus type */ static devclass_t bxe_devclass; MODULE_DEPEND(bxe, pci, 1, 1, 1); MODULE_DEPEND(bxe, ether, 1, 1, 1); DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); /* resources needed for unloading a previously loaded device */ #define BXE_PREV_WAIT_NEEDED 1 struct mtx bxe_prev_mtx; MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); struct bxe_prev_list_node { LIST_ENTRY(bxe_prev_list_node) node; uint8_t bus; uint8_t slot; uint8_t path; uint8_t aer; /* XXX automatic error recovery */ uint8_t undi; }; static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ /* Tunable device values... */ SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); /* Debug */ unsigned long bxe_debug = 0; SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, &bxe_debug, 0, "Debug logging mode"); /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ static int bxe_interrupt_mode = INTR_MODE_MSIX; SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ static int bxe_queue_count = 4; SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, &bxe_queue_count, 0, "Multi-Queue queue count"); /* max number of buffers per queue (default RX_BD_USABLE) */ static int bxe_max_rx_bufs = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); /* Host interrupt coalescing RX tick timer (usecs) */ static int bxe_hc_rx_ticks = 25; SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); /* Host interrupt coalescing TX tick timer (usecs) */ static int bxe_hc_tx_ticks = 50; SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); /* Maximum number of Rx packets to process at a time */ static int bxe_rx_budget = 0xffffffff; SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, &bxe_rx_budget, 0, "Rx processing budget"); /* Maximum LRO aggregation size */ static int bxe_max_aggregation_size = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, &bxe_max_aggregation_size, 0, "max aggregation size"); /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ static int bxe_mrrs = -1; SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, &bxe_mrrs, 0, "PCIe maximum read request size"); /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ static int bxe_autogreeen = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, &bxe_autogreeen, 0, "AutoGrEEEn support"); /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ static int bxe_udp_rss = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, &bxe_udp_rss, 0, "UDP RSS support"); #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ #define STATS_OFFSET32(stat_name) \ (offsetof(struct bxe_eth_stats, stat_name) / 4) #define Q_STATS_OFFSET32(stat_name) \ (offsetof(struct bxe_eth_q_stats, stat_name) / 4) static const struct { uint32_t offset; uint32_t size; uint32_t flags; #define STATS_FLAGS_PORT 1 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) char string[STAT_NAME_LEN]; } bxe_eth_stats_arr[] = { { STATS_OFFSET32(total_bytes_received_hi), 8, STATS_FLAGS_BOTH, "rx_bytes" }, { STATS_OFFSET32(error_bytes_received_hi), 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8, STATS_FLAGS_PORT, "rx_crc_errors" }, { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8, STATS_FLAGS_PORT, "rx_align_errors" }, { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 8, STATS_FLAGS_PORT, "rx_fragments" }, { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, STATS_FLAGS_PORT, "rx_jabbers" }, { STATS_OFFSET32(no_buff_discard_hi), 8, STATS_FLAGS_BOTH, "rx_discards" }, { STATS_OFFSET32(mac_filter_discard), 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, { STATS_OFFSET32(mf_tag_discard), 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, { STATS_OFFSET32(pfc_frames_received_hi), 8, STATS_FLAGS_PORT, "pfc_frames_received" }, { STATS_OFFSET32(pfc_frames_sent_hi), 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, { STATS_OFFSET32(brb_drop_hi), 8, STATS_FLAGS_PORT, "rx_brb_discard" }, { STATS_OFFSET32(brb_truncate_hi), 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, { STATS_OFFSET32(pause_frames_received_hi), 8, STATS_FLAGS_PORT, "rx_pause_frames" }, { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, { STATS_OFFSET32(nig_timer_max), 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, { STATS_OFFSET32(total_bytes_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_bytes" }, { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, STATS_FLAGS_PORT, "tx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8, STATS_FLAGS_PORT, "tx_mac_errors" }, { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8, STATS_FLAGS_PORT, "tx_single_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8, STATS_FLAGS_PORT, "tx_deferred" }, { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 8, STATS_FLAGS_PORT, "tx_late_collisions" }, { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 8, STATS_FLAGS_PORT, "tx_total_collisions" }, { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, { STATS_OFFSET32(etherstatspktsover1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, { STATS_OFFSET32(pause_frames_sent_hi), 8, STATS_FLAGS_PORT, "tx_pause_frames" }, { STATS_OFFSET32(total_tpa_aggregations_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, { STATS_OFFSET32(total_tpa_bytes_hi), 8, STATS_FLAGS_FUNC, "tpa_bytes"}, { STATS_OFFSET32(eee_tx_lpi), 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, { STATS_OFFSET32(rx_calls), 4, STATS_FLAGS_FUNC, "rx_calls"}, { STATS_OFFSET32(rx_pkts), 4, STATS_FLAGS_FUNC, "rx_pkts"}, { STATS_OFFSET32(rx_tpa_pkts), 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, { STATS_OFFSET32(rx_bxe_service_rxsgl), 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, { STATS_OFFSET32(rx_jumbo_sge_pkts), 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, { STATS_OFFSET32(rx_soft_errors), 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, { STATS_OFFSET32(rx_hw_csum_errors), 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, { STATS_OFFSET32(rx_ofld_frames_csum_ip), 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, { STATS_OFFSET32(rx_budget_reached), 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, { STATS_OFFSET32(tx_pkts), 4, STATS_FLAGS_FUNC, "tx_pkts"}, { STATS_OFFSET32(tx_soft_errors), 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, { STATS_OFFSET32(tx_ofld_frames_csum_ip), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, { STATS_OFFSET32(tx_ofld_frames_csum_udp), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, { STATS_OFFSET32(tx_ofld_frames_lso), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, { STATS_OFFSET32(tx_encap_failures), 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, { STATS_OFFSET32(tx_hw_queue_full), 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, { STATS_OFFSET32(tx_hw_max_queue_depth), 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, { STATS_OFFSET32(tx_dma_mapping_failure), 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, { STATS_OFFSET32(tx_max_drbr_queue_depth), 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, { STATS_OFFSET32(tx_window_violation_std), 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, { STATS_OFFSET32(tx_window_violation_tso), 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, { STATS_OFFSET32(tx_chain_lost_mbuf), 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, { STATS_OFFSET32(tx_frames_deferred), 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, { STATS_OFFSET32(tx_queue_xoff), 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, { STATS_OFFSET32(mbuf_defrag_attempts), 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, { STATS_OFFSET32(mbuf_defrag_failures), 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, { STATS_OFFSET32(mbuf_alloc_tx), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, { STATS_OFFSET32(mbuf_alloc_rx), 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, { STATS_OFFSET32(mbuf_alloc_sge), 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, { STATS_OFFSET32(mbuf_alloc_tpa), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}, { STATS_OFFSET32(tx_queue_full_return), 4, STATS_FLAGS_FUNC, "tx_queue_full_return"} }; static const struct { uint32_t offset; uint32_t size; char string[STAT_NAME_LEN]; } bxe_eth_q_stats_arr[] = { { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "rx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 8, "rx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 8, "rx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 8, "rx_bcast_packets" }, { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "rx_discards" }, { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "tx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, "tx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, "tx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 8, "tx_bcast_packets" }, { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 8, "tpa_aggregations" }, { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, "tpa_aggregated_frames"}, { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "tpa_bytes"}, { Q_STATS_OFFSET32(rx_calls), 4, "rx_calls"}, { Q_STATS_OFFSET32(rx_pkts), 4, "rx_pkts"}, { Q_STATS_OFFSET32(rx_tpa_pkts), 4, "rx_tpa_pkts"}, { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 4, "rx_erroneous_jumbo_sge_pkts"}, { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 4, "rx_bxe_service_rxsgl"}, { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 4, "rx_jumbo_sge_pkts"}, { Q_STATS_OFFSET32(rx_soft_errors), 4, "rx_soft_errors"}, { Q_STATS_OFFSET32(rx_hw_csum_errors), 4, "rx_hw_csum_errors"}, { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 4, "rx_ofld_frames_csum_ip"}, { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 4, "rx_ofld_frames_csum_tcp_udp"}, { Q_STATS_OFFSET32(rx_budget_reached), 4, "rx_budget_reached"}, { Q_STATS_OFFSET32(tx_pkts), 4, "tx_pkts"}, { Q_STATS_OFFSET32(tx_soft_errors), 4, "tx_soft_errors"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 4, "tx_ofld_frames_csum_ip"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 4, "tx_ofld_frames_csum_tcp"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 4, "tx_ofld_frames_csum_udp"}, { Q_STATS_OFFSET32(tx_ofld_frames_lso), 4, "tx_ofld_frames_lso"}, { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 4, "tx_ofld_frames_lso_hdr_splits"}, { Q_STATS_OFFSET32(tx_encap_failures), 4, "tx_encap_failures"}, { Q_STATS_OFFSET32(tx_hw_queue_full), 4, "tx_hw_queue_full"}, { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 4, "tx_hw_max_queue_depth"}, { Q_STATS_OFFSET32(tx_dma_mapping_failure), 4, "tx_dma_mapping_failure"}, { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 4, "tx_max_drbr_queue_depth"}, { Q_STATS_OFFSET32(tx_window_violation_std), 4, "tx_window_violation_std"}, { Q_STATS_OFFSET32(tx_window_violation_tso), 4, "tx_window_violation_tso"}, { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 4, "tx_chain_lost_mbuf"}, { Q_STATS_OFFSET32(tx_frames_deferred), 4, "tx_frames_deferred"}, { Q_STATS_OFFSET32(tx_queue_xoff), 4, "tx_queue_xoff"}, { Q_STATS_OFFSET32(mbuf_defrag_attempts), 4, "mbuf_defrag_attempts"}, { Q_STATS_OFFSET32(mbuf_defrag_failures), 4, "mbuf_defrag_failures"}, { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 4, "mbuf_rx_bd_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 4, "mbuf_rx_bd_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 4, "mbuf_rx_tpa_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 4, "mbuf_rx_tpa_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 4, "mbuf_rx_sge_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 4, "mbuf_rx_sge_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_alloc_tx), 4, "mbuf_alloc_tx"}, { Q_STATS_OFFSET32(mbuf_alloc_rx), 4, "mbuf_alloc_rx"}, { Q_STATS_OFFSET32(mbuf_alloc_sge), 4, "mbuf_alloc_sge"}, { Q_STATS_OFFSET32(mbuf_alloc_tpa), 4, "mbuf_alloc_tpa"}, { Q_STATS_OFFSET32(tx_queue_full_return), 4, "tx_queue_full_return"} }; #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) static void bxe_cmng_fns_init(struct bxe_softc *sc, uint8_t read_cfg, uint8_t cmng_type); static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); static void storm_memset_cmng(struct bxe_softc *sc, struct cmng_init *cmng, uint8_t port); static void bxe_set_reset_global(struct bxe_softc *sc); static void bxe_set_reset_in_progress(struct bxe_softc *sc); static uint8_t bxe_reset_is_done(struct bxe_softc *sc, int engine); static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print); static void bxe_int_disable(struct bxe_softc *sc); static int bxe_release_leader_lock(struct bxe_softc *sc); static void bxe_pf_disable(struct bxe_softc *sc); static void bxe_free_fp_buffers(struct bxe_softc *sc); static inline void bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod, uint16_t rx_sge_prod); static void bxe_link_report_locked(struct bxe_softc *sc); static void bxe_link_report(struct bxe_softc *sc); static void bxe_link_status_update(struct bxe_softc *sc); static void bxe_periodic_callout_func(void *xsc); static void bxe_periodic_start(struct bxe_softc *sc); static void bxe_periodic_stop(struct bxe_softc *sc); static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t prev_index, uint16_t index); static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, int queue); static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index); static uint8_t bxe_txeof(struct bxe_softc *sc, struct bxe_fastpath *fp); static void bxe_task_fp(struct bxe_fastpath *fp); static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m, uint8_t contents); static int bxe_alloc_mem(struct bxe_softc *sc); static void bxe_free_mem(struct bxe_softc *sc); static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); static void bxe_free_fw_stats_mem(struct bxe_softc *sc); static int bxe_interrupt_attach(struct bxe_softc *sc); static void bxe_interrupt_detach(struct bxe_softc *sc); static void bxe_set_rx_mode(struct bxe_softc *sc); static int bxe_init_locked(struct bxe_softc *sc); static int bxe_stop_locked(struct bxe_softc *sc); static __noinline int bxe_nic_load(struct bxe_softc *sc, int load_mode); static __noinline int bxe_nic_unload(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link); static void bxe_handle_sp_tq(void *context, int pending); static void bxe_handle_fp_tq(void *context, int pending); static int bxe_add_cdev(struct bxe_softc *sc); static void bxe_del_cdev(struct bxe_softc *sc); +static int bxe_grc_dump(struct bxe_softc *sc); static int bxe_alloc_buf_rings(struct bxe_softc *sc); static void bxe_free_buf_rings(struct bxe_softc *sc); /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ uint32_t calc_crc32(uint8_t *crc32_packet, uint32_t crc32_length, uint32_t crc32_seed, uint8_t complement) { uint32_t byte = 0; uint32_t bit = 0; uint8_t msb = 0; uint32_t temp = 0; uint32_t shft = 0; uint8_t current_byte = 0; uint32_t crc32_result = crc32_seed; const uint32_t CRC32_POLY = 0x1edc6f41; if ((crc32_packet == NULL) || (crc32_length == 0) || ((crc32_length % 8) != 0)) { return (crc32_result); } for (byte = 0; byte < crc32_length; byte = byte + 1) { current_byte = crc32_packet[byte]; for (bit = 0; bit < 8; bit = bit + 1) { /* msb = crc32_result[31]; */ msb = (uint8_t)(crc32_result >> 31); crc32_result = crc32_result << 1; /* it (msb != current_byte[bit]) */ if (msb != (0x1 & (current_byte >> bit))) { crc32_result = crc32_result ^ CRC32_POLY; /* crc32_result[0] = 1 */ crc32_result |= 1; } } } /* Last step is to: * 1. "mirror" every bit * 2. swap the 4 bytes * 3. complement each bit */ /* Mirror */ temp = crc32_result; shft = sizeof(crc32_result) * 8 - 1; for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) { temp <<= 1; temp |= crc32_result & 1; shft-- ; } /* temp[31-bit] = crc32_result[bit] */ temp <<= shft; /* Swap */ /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ { uint32_t t0, t1, t2, t3; t0 = (0x000000ff & (temp >> 24)); t1 = (0x0000ff00 & (temp >> 8)); t2 = (0x00ff0000 & (temp << 8)); t3 = (0xff000000 & (temp << 24)); crc32_result = t0 | t1 | t2 | t3; } /* Complement */ if (complement) { crc32_result = ~crc32_result; } return (crc32_result); } int bxe_test_bit(int nr, volatile unsigned long *addr) { return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); } void bxe_set_bit(unsigned int nr, volatile unsigned long *addr) { atomic_set_acq_long(addr, (1 << nr)); } void bxe_clear_bit(int nr, volatile unsigned long *addr) { atomic_clear_acq_long(addr, (1 << nr)); } int bxe_test_and_set_bit(int nr, volatile unsigned long *addr) { unsigned long x; nr = (1 << nr); do { x = *addr; } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); // if (x & nr) bit_was_set; else bit_was_not_set; return (x & nr); } int bxe_test_and_clear_bit(int nr, volatile unsigned long *addr) { unsigned long x; nr = (1 << nr); do { x = *addr; } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); // if (x & nr) bit_was_set; else bit_was_not_set; return (x & nr); } int bxe_cmpxchg(volatile int *addr, int old, int new) { int x; do { x = *addr; } while (atomic_cmpset_acq_int(addr, old, new) == 0); return (x); } /* * Get DMA memory from the OS. * * Validates that the OS has provided DMA buffers in response to a * bus_dmamap_load call and saves the physical address of those buffers. * When the callback is used the OS will return 0 for the mapping function * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any * failures back to the caller. * * Returns: * Nothing. */ static void bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bxe_dma *dma = arg; if (error) { dma->paddr = 0; dma->nseg = 0; BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); } else { dma->paddr = segs->ds_addr; dma->nseg = nseg; } } /* * Allocate a block of memory and map it for DMA. No partial completions * allowed and release any resources acquired if we can't acquire all * resources. * * Returns: * 0 = Success, !0 = Failure */ int bxe_dma_alloc(struct bxe_softc *sc, bus_size_t size, struct bxe_dma *dma, const char *msg) { int rc; if (dma->size > 0) { BLOGE(sc, "dma block '%s' already has size %lu\n", msg, (unsigned long)dma->size); return (1); } memset(dma, 0, sizeof(*dma)); /* sanity */ dma->sc = sc; dma->size = size; snprintf(dma->msg, sizeof(dma->msg), "%s", msg); rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ BCM_PAGE_SIZE, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ size, /* max map size */ 1, /* num discontinuous */ size, /* max seg size */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &dma->tag); /* returned dma tag */ if (rc != 0) { BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); memset(dma, 0, sizeof(*dma)); return (1); } rc = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &dma->map); if (rc != 0) { BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); bus_dma_tag_destroy(dma->tag); memset(dma, 0, sizeof(*dma)); return (1); } rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, bxe_dma_map_addr, /* BLOGD in here */ dma, BUS_DMA_NOWAIT); if (rc != 0) { BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); bus_dma_tag_destroy(dma->tag); memset(dma, 0, sizeof(*dma)); return (1); } return (0); } void bxe_dma_free(struct bxe_softc *sc, struct bxe_dma *dma) { if (dma->size > 0) { DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); bus_dmamap_sync(dma->tag, dma->map, (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); bus_dmamap_unload(dma->tag, dma->map); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); bus_dma_tag_destroy(dma->tag); } memset(dma, 0, sizeof(*dma)); } /* * These indirect read and write routines are only during init. * The locking is handled by the MCP. */ void bxe_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val) { pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); } uint32_t bxe_reg_rd_ind(struct bxe_softc *sc, uint32_t addr) { uint32_t val; pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); return (val); } static int bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; int cnt; /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" " resource_bit 0x%x\n", resource, resource_bit); return (-1); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); } /* validate the resource is not already taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n", resource, lock_status, resource_bit); return (-1); } /* try every 5ms for 5 seconds */ for (cnt = 0; cnt < 1000; cnt++) { REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { return (0); } DELAY(5000); } BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n", resource, resource_bit); return (-1); } static int bxe_release_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" " resource_bit 0x%x\n", resource, resource_bit); return (-1); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); } /* validate the resource is currently taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (!(lock_status & resource_bit)) { BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n", resource, lock_status, resource_bit); return (-1); } REG_WR(sc, hw_lock_control_reg, resource_bit); return (0); } static void bxe_acquire_phy_lock(struct bxe_softc *sc) { BXE_PHY_LOCK(sc); bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); } static void bxe_release_phy_lock(struct bxe_softc *sc) { bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); BXE_PHY_UNLOCK(sc); } /* * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, * had we done things the other way around, if two pfs from the same port * would attempt to access nvram at the same time, we could run into a * scenario such as: * pf A takes the port lock. * pf B succeeds in taking the same lock since they are from the same port. * pf A takes the per pf misc lock. Performs eeprom access. * pf A finishes. Unlocks the per pf misc lock. * Pf B takes the lock and proceeds to perform it's own access. * pf A unlocks the per port lock, while pf B is still working (!). * mcp takes the per port lock and corrupts pf B's access (and/or has it's own * access corrupted by pf B).* */ static int bxe_acquire_nvram_lock(struct bxe_softc *sc) { int port = SC_PORT(sc); int count, i; uint32_t val = 0; /* acquire HW lock: protect against other PFs in PF Direct Assignment */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* request access to nvram interface */ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); for (i = 0; i < count*10; i++) { val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { break; } DELAY(5); } if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { BLOGE(sc, "Cannot get access to nvram interface " "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", port, val); return (-1); } return (0); } static int bxe_release_nvram_lock(struct bxe_softc *sc) { int port = SC_PORT(sc); int count, i; uint32_t val = 0; /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* relinquish nvram interface */ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); for (i = 0; i < count*10; i++) { val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { break; } DELAY(5); } if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { BLOGE(sc, "Cannot free access to nvram interface " "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", port, val); return (-1); } /* release HW lock: protect against other PFs in PF Direct Assignment */ bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); return (0); } static void bxe_enable_nvram_access(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); /* enable both bits, even on read */ REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); } static void bxe_disable_nvram_access(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); /* disable both bits, even after read */ REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN))); } static int bxe_nvram_read_dword(struct bxe_softc *sc, uint32_t offset, uint32_t *ret_val, uint32_t cmd_flags) { int count, i, rc; uint32_t val; /* build the command word */ cmd_flags |= MCPR_NVM_COMMAND_DOIT; /* need to clear DONE bit separately */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); /* address of the NVRAM to read from */ REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); /* issue a read command */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* wait for completion */ *ret_val = 0; rc = -1; for (i = 0; i < count; i++) { DELAY(5); val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); if (val & MCPR_NVM_COMMAND_DONE) { val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); /* we read nvram data in cpu order * but ethtool sees it as an array of bytes * converting to big-endian will do the work */ *ret_val = htobe32(val); rc = 0; break; } } if (rc == -1) { BLOGE(sc, "nvram read timeout expired " "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", offset, cmd_flags, val); } return (rc); } static int bxe_nvram_read(struct bxe_softc *sc, uint32_t offset, uint8_t *ret_buf, int buf_size) { uint32_t cmd_flags; uint32_t val; int rc; if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", offset, buf_size); return (-1); } if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); /* read the first word(s) */ cmd_flags = MCPR_NVM_COMMAND_FIRST; while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); memcpy(ret_buf, &val, 4); /* advance to the next dword */ offset += sizeof(uint32_t); ret_buf += sizeof(uint32_t); buf_size -= sizeof(uint32_t); cmd_flags = 0; } if (rc == 0) { cmd_flags |= MCPR_NVM_COMMAND_LAST; rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); memcpy(ret_buf, &val, 4); } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } static int bxe_nvram_write_dword(struct bxe_softc *sc, uint32_t offset, uint32_t val, uint32_t cmd_flags) { int count, i, rc; /* build the command word */ cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); /* need to clear DONE bit separately */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); /* write the data */ REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); /* address of the NVRAM to write to */ REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); /* issue the write command */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* wait for completion */ rc = -1; for (i = 0; i < count; i++) { DELAY(5); val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); if (val & MCPR_NVM_COMMAND_DONE) { rc = 0; break; } } if (rc == -1) { BLOGE(sc, "nvram write timeout expired " "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", offset, cmd_flags, val); } return (rc); } #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) static int bxe_nvram_write1(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf, int buf_size) { uint32_t cmd_flags; uint32_t align_offset; uint32_t val; int rc; if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); align_offset = (offset & ~0x03); rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); if (rc == 0) { val &= ~(0xff << BYTE_OFFSET(offset)); val |= (*data_buf << BYTE_OFFSET(offset)); /* nvram data is returned as an array of bytes * convert it back to cpu order */ val = be32toh(val); rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } static int bxe_nvram_write(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf, int buf_size) { uint32_t cmd_flags; uint32_t val; uint32_t written_so_far; int rc; if (buf_size == 1) { return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); } if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", offset, buf_size); return (-1); } if (buf_size == 0) { return (0); /* nothing to do */ } if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); written_so_far = 0; cmd_flags = MCPR_NVM_COMMAND_FIRST; while ((written_so_far < buf_size) && (rc == 0)) { if (written_so_far == (buf_size - sizeof(uint32_t))) { cmd_flags |= MCPR_NVM_COMMAND_LAST; } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { cmd_flags |= MCPR_NVM_COMMAND_LAST; } else if ((offset % NVRAM_PAGE_SIZE) == 0) { cmd_flags |= MCPR_NVM_COMMAND_FIRST; } memcpy(&val, data_buf, 4); rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); /* advance to the next dword */ offset += sizeof(uint32_t); data_buf += sizeof(uint32_t); written_so_far += sizeof(uint32_t); cmd_flags = 0; } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } /* copy command into DMAE command memory and set DMAE command Go */ void bxe_post_dmae(struct bxe_softc *sc, struct dmae_cmd *dmae, int idx) { uint32_t cmd_offset; int i; cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx)); for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) { REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); } REG_WR(sc, dmae_reg_go_c[idx], 1); } uint32_t bxe_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type) { return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) | DMAE_CMD_C_TYPE_ENABLE)); } uint32_t bxe_dmae_opcode_clr_src_reset(uint32_t opcode) { return (opcode & ~DMAE_CMD_SRC_RESET); } uint32_t bxe_dmae_opcode(struct bxe_softc *sc, uint8_t src_type, uint8_t dst_type, uint8_t with_comp, uint8_t comp_type) { uint32_t opcode = 0; opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) | (dst_type << DMAE_CMD_DST_SHIFT)); opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) | (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT)); opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT); #ifdef __BIG_ENDIAN opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; #else opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; #endif if (with_comp) { opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); } return (opcode); } static void bxe_prep_dmae_with_comp(struct bxe_softc *sc, struct dmae_cmd *dmae, uint8_t src_type, uint8_t dst_type) { memset(dmae, 0, sizeof(struct dmae_cmd)); /* set the opcode */ dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, TRUE, DMAE_COMP_PCI); /* fill in the completion parameters */ dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); dmae->comp_val = DMAE_COMP_VAL; } /* issue a DMAE command over the init channel and wait for completion */ static int bxe_issue_dmae_with_comp(struct bxe_softc *sc, struct dmae_cmd *dmae) { uint32_t *wb_comp = BXE_SP(sc, wb_comp); int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; BXE_DMAE_LOCK(sc); /* reset completion */ *wb_comp = 0; /* post the command on the channel used for initializations */ bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); /* wait for completion */ DELAY(5); while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { if (!timeout || (sc->recovery_state != BXE_RECOVERY_DONE && sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n", *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_TIMEOUT); } timeout--; DELAY(50); } if (*wb_comp & DMAE_PCI_ERR_FLAG) { BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n", *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_PCI_ERROR); } BXE_DMAE_UNLOCK(sc); return (0); } void bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr, uint32_t len32) { struct dmae_cmd dmae; uint32_t *data; int i, rc; DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); if (!sc->dmae_ready) { data = BXE_SP(sc, wb_data[0]); for (i = 0; i < len32; i++) { data[i] = (CHIP_IS_E1(sc)) ? bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : REG_RD(sc, (src_addr + (i * 4))); } return; } /* set opcode and fixed command fields */ bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); /* fill in addresses and len */ dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ dmae.src_addr_hi = 0; dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); dmae.len = len32; /* issue the command and wait for completion */ if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { bxe_panic(sc, ("DMAE failed (%d)\n", rc)); } } void bxe_write_dmae(struct bxe_softc *sc, bus_addr_t dma_addr, uint32_t dst_addr, uint32_t len32) { struct dmae_cmd dmae; int rc; if (!sc->dmae_ready) { DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); if (CHIP_IS_E1(sc)) { ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); } else { ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); } return; } /* set opcode and fixed command fields */ bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); /* fill in addresses and len */ dmae.src_addr_lo = U64_LO(dma_addr); dmae.src_addr_hi = U64_HI(dma_addr); dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ dmae.dst_addr_hi = 0; dmae.len = len32; /* issue the command and wait for completion */ if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { bxe_panic(sc, ("DMAE failed (%d)\n", rc)); } } void bxe_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len) { int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); int offset = 0; while (len > dmae_wr_max) { bxe_write_dmae(sc, (phys_addr + offset), /* src DMA address */ (addr + offset), /* dst GRC address */ dmae_wr_max); offset += (dmae_wr_max * 4); len -= dmae_wr_max; } bxe_write_dmae(sc, (phys_addr + offset), /* src DMA address */ (addr + offset), /* dst GRC address */ len); } void bxe_set_ctx_validation(struct bxe_softc *sc, struct eth_context *cxt, uint32_t cid) { /* ustorm cxt validation */ cxt->ustorm_ag_context.cdu_usage = CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); /* xcontext validation */ cxt->xstorm_ag_context.cdu_reserved = CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); } static void bxe_storm_memset_hc_timeout(struct bxe_softc *sc, uint8_t port, uint8_t fw_sb_id, uint8_t sb_index, uint8_t ticks) { uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); REG_WR8(sc, addr, ticks); BLOGD(sc, DBG_LOAD, "port %d fw_sb_id %d sb_index %d ticks %d\n", port, fw_sb_id, sb_index, ticks); } static void bxe_storm_memset_hc_disable(struct bxe_softc *sc, uint8_t port, uint16_t fw_sb_id, uint8_t sb_index, uint8_t disable) { uint32_t enable_flag = (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); uint8_t flags; /* clear and set */ flags = REG_RD8(sc, addr); flags &= ~HC_INDEX_DATA_HC_ENABLED; flags |= enable_flag; REG_WR8(sc, addr, flags); BLOGD(sc, DBG_LOAD, "port %d fw_sb_id %d sb_index %d disable %d\n", port, fw_sb_id, sb_index, disable); } void bxe_update_coalesce_sb_index(struct bxe_softc *sc, uint8_t fw_sb_id, uint8_t sb_index, uint8_t disable, uint16_t usec) { int port = SC_PORT(sc); uint8_t ticks = (usec / 4); /* XXX ??? */ bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); disable = (disable) ? 1 : ((usec) ? 0 : 1); bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); } void elink_cb_udelay(struct bxe_softc *sc, uint32_t usecs) { DELAY(usecs); } uint32_t elink_cb_reg_read(struct bxe_softc *sc, uint32_t reg_addr) { return (REG_RD(sc, reg_addr)); } void elink_cb_reg_write(struct bxe_softc *sc, uint32_t reg_addr, uint32_t val) { REG_WR(sc, reg_addr, val); } void elink_cb_reg_wb_write(struct bxe_softc *sc, uint32_t offset, uint32_t *wb_write, uint16_t len) { REG_WR_DMAE(sc, offset, wb_write, len); } void elink_cb_reg_wb_read(struct bxe_softc *sc, uint32_t offset, uint32_t *wb_write, uint16_t len) { REG_RD_DMAE(sc, offset, wb_write, len); } uint8_t elink_cb_path_id(struct bxe_softc *sc) { return (SC_PATH(sc)); } void elink_cb_event_log(struct bxe_softc *sc, const elink_log_id_t elink_log_id, ...) { /* XXX */ BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); } static int bxe_set_spio(struct bxe_softc *sc, int spio, uint32_t mode) { uint32_t spio_reg; /* Only 2 SPIOs are configurable */ if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); /* read SPIO and mask except the float bits */ spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); switch (mode) { case MISC_SPIO_OUTPUT_LOW: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); /* clear FLOAT and set CLR */ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); spio_reg |= (spio << MISC_SPIO_CLR_POS); break; case MISC_SPIO_OUTPUT_HIGH: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); /* clear FLOAT and set SET */ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); spio_reg |= (spio << MISC_SPIO_SET_POS); break; case MISC_SPIO_INPUT_HI_Z: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); /* set FLOAT */ spio_reg |= (spio << MISC_SPIO_FLOAT_POS); break; default: break; } REG_WR(sc, MISC_REG_SPIO, spio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); return (0); } static int bxe_gpio_read(struct bxe_softc *sc, int gpio_num, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d" " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift, gpio_mask); return (-1); } /* read GPIO value */ gpio_reg = REG_RD(sc, MISC_REG_GPIO); /* get the requested pin value */ return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; } static int bxe_gpio_write(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" " gpio_shift %d gpio_mask 0x%x\n", gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear FLOAT and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear FLOAT and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> input\n", gpio_num, gpio_shift); /* set FLOAT */ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: break; } REG_WR(sc, MISC_REG_GPIO, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } static int bxe_gpio_mult_write(struct bxe_softc *sc, uint8_t pins, uint32_t mode) { uint32_t gpio_reg; /* any port swapping should be handled by caller */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = REG_RD(sc, MISC_REG_GPIO); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); /* set CLR */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); /* set SET */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); /* set FLOAT */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x" " gpio_reg 0x%x\n", pins, mode, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (-1); } REG_WR(sc, MISC_REG_GPIO, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } static int bxe_gpio_int_write(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" " gpio_shift %d gpio_mask 0x%x\n", gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO int */ gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); switch (mode) { case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: BLOGD(sc, DBG_PHY, "Clear GPIO INT %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear SET and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); break; case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: BLOGD(sc, DBG_PHY, "Set GPIO INT %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear CLR and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); break; default: break; } REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } uint32_t elink_cb_gpio_read(struct bxe_softc *sc, uint16_t gpio_num, uint8_t port) { return (bxe_gpio_read(sc, gpio_num, port)); } uint8_t elink_cb_gpio_write(struct bxe_softc *sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ uint8_t port) { return (bxe_gpio_write(sc, gpio_num, mode, port)); } uint8_t elink_cb_gpio_mult_write(struct bxe_softc *sc, uint8_t pins, uint8_t mode) /* 0=low 1=high */ { return (bxe_gpio_mult_write(sc, pins, mode)); } uint8_t elink_cb_gpio_int_write(struct bxe_softc *sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ uint8_t port) { return (bxe_gpio_int_write(sc, gpio_num, mode, port)); } void elink_cb_notify_link_changed(struct bxe_softc *sc) { REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + (SC_FUNC(sc) * sizeof(uint32_t))), 1); } /* send the MCP a request, block until there is a reply */ uint32_t elink_cb_fw_command(struct bxe_softc *sc, uint32_t command, uint32_t param) { int mb_idx = SC_FW_MB_IDX(sc); uint32_t seq; uint32_t rc = 0; uint32_t cnt = 1; uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; BXE_FWMB_LOCK(sc); seq = ++sc->fw_seq; SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); BLOGD(sc, DBG_PHY, "wrote command 0x%08x to FW MB param 0x%08x\n", (command | seq), param); /* Let the FW do it's magic. GIve it up to 5 seconds... */ do { DELAY(delay * 1000); rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); BLOGD(sc, DBG_PHY, "[after %d ms] read 0x%x seq 0x%x from FW MB\n", cnt*delay, rc, seq); /* is this a reply to our command? */ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { rc &= FW_MSG_CODE_MASK; } else { /* Ruh-roh! */ BLOGE(sc, "FW failed to respond!\n"); // XXX bxe_fw_dump(sc); rc = 0; } BXE_FWMB_UNLOCK(sc); return (rc); } static uint32_t bxe_fw_command(struct bxe_softc *sc, uint32_t command, uint32_t param) { return (elink_cb_fw_command(sc, command, param)); } static void __storm_memset_dma_mapping(struct bxe_softc *sc, uint32_t addr, bus_addr_t mapping) { REG_WR(sc, addr, U64_LO(mapping)); REG_WR(sc, (addr + 4), U64_HI(mapping)); } static void storm_memset_spq_addr(struct bxe_softc *sc, bus_addr_t mapping, uint16_t abs_fid) { uint32_t addr = (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); __storm_memset_dma_mapping(sc, addr, mapping); } static void storm_memset_vf_to_pf(struct bxe_softc *sc, uint16_t abs_fid, uint16_t pf_id) { REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); } static void storm_memset_func_en(struct bxe_softc *sc, uint16_t abs_fid, uint8_t enable) { REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); } static void storm_memset_eq_data(struct bxe_softc *sc, struct event_ring_data *eq_data, uint16_t pfid) { uint32_t addr; size_t size; addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); size = sizeof(struct event_ring_data); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); } static void storm_memset_eq_prod(struct bxe_softc *sc, uint16_t eq_prod, uint16_t pfid) { uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid)); REG_WR16(sc, addr, eq_prod); } /* * Post a slowpath command. * * A slowpath command is used to propogate a configuration change through * the controller in a controlled manner, allowing each STORM processor and * other H/W blocks to phase in the change. The commands sent on the * slowpath are referred to as ramrods. Depending on the ramrod used the * completion of the ramrod will occur in different ways. Here's a * breakdown of ramrods and how they complete: * * RAMROD_CMD_ID_ETH_PORT_SETUP * Used to setup the leading connection on a port. Completes on the * Receive Completion Queue (RCQ) of that port (typically fp[0]). * * RAMROD_CMD_ID_ETH_CLIENT_SETUP * Used to setup an additional connection on a port. Completes on the * RCQ of the multi-queue/RSS connection being initialized. * * RAMROD_CMD_ID_ETH_STAT_QUERY * Used to force the storm processors to update the statistics database * in host memory. This ramrod is send on the leading connection CID and * completes as an index increment of the CSTORM on the default status * block. * * RAMROD_CMD_ID_ETH_UPDATE * Used to update the state of the leading connection, usually to udpate * the RSS indirection table. Completes on the RCQ of the leading * connection. (Not currently used under FreeBSD until OS support becomes * available.) * * RAMROD_CMD_ID_ETH_HALT * Used when tearing down a connection prior to driver unload. Completes * on the RCQ of the multi-queue/RSS connection being torn down. Don't * use this on the leading connection. * * RAMROD_CMD_ID_ETH_SET_MAC * Sets the Unicast/Broadcast/Multicast used by the port. Completes on * the RCQ of the leading connection. * * RAMROD_CMD_ID_ETH_CFC_DEL * Used when tearing down a conneciton prior to driver unload. Completes * on the RCQ of the leading connection (since the current connection * has been completely removed from controller memory). * * RAMROD_CMD_ID_ETH_PORT_DEL * Used to tear down the leading connection prior to driver unload, * typically fp[0]. Completes as an index increment of the CSTORM on the * default status block. * * RAMROD_CMD_ID_ETH_FORWARD_SETUP * Used for connection offload. Completes on the RCQ of the multi-queue * RSS connection that is being offloaded. (Not currently used under * FreeBSD.) * * There can only be one command pending per function. * * Returns: * 0 = Success, !0 = Failure. */ /* must be called under the spq lock */ static inline struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) { struct eth_spe *next_spe = sc->spq_prod_bd; if (sc->spq_prod_bd == sc->spq_last_bd) { /* wrap back to the first eth_spq */ sc->spq_prod_bd = sc->spq; sc->spq_prod_idx = 0; } else { sc->spq_prod_bd++; sc->spq_prod_idx++; } return (next_spe); } /* must be called under the spq lock */ static inline void bxe_sp_prod_update(struct bxe_softc *sc) { int func = SC_FUNC(sc); /* * Make sure that BD data is updated before writing the producer. * BD data is written to the memory, the producer is read from the * memory, thus we need a full memory barrier to ensure the ordering. */ mb(); REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), sc->spq_prod_idx); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); } /** * bxe_is_contextless_ramrod - check if the current command ends on EQ * * @cmd: command to check * @cmd_type: command type */ static inline int bxe_is_contextless_ramrod(int cmd, int cmd_type) { if ((cmd_type == NONE_CONNECTION_TYPE) || (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { return (TRUE); } else { return (FALSE); } } /** * bxe_sp_post - place a single command on an SP ring * * @sc: driver handle * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) * @cid: SW CID the command is related to * @data_hi: command private data address (high 32 bits) * @data_lo: command private data address (low 32 bits) * @cmd_type: command type (e.g. NONE, ETH) * * SP data is handled as if it's always an address pair, thus data fields are * not swapped to little endian in upper functions. Instead this function swaps * data as if it's two uint32 fields. */ int bxe_sp_post(struct bxe_softc *sc, int command, int cid, uint32_t data_hi, uint32_t data_lo, int cmd_type) { struct eth_spe *spe; uint16_t type; int common; common = bxe_is_contextless_ramrod(command, cmd_type); BXE_SP_LOCK(sc); if (common) { if (!atomic_load_acq_long(&sc->eq_spq_left)) { BLOGE(sc, "EQ ring is full!\n"); BXE_SP_UNLOCK(sc); return (-1); } } else { if (!atomic_load_acq_long(&sc->cq_spq_left)) { BLOGE(sc, "SPQ ring is full!\n"); BXE_SP_UNLOCK(sc); return (-1); } } spe = bxe_sp_get_next(sc); /* CID needs port number to be encoded int it */ spe->hdr.conn_and_cmd_data = htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid)); type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE; /* TBD: Check if it works for VFs */ type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) & SPE_HDR_T_FUNCTION_ID); spe->hdr.type = htole16(type); spe->data.update_data_addr.hi = htole32(data_hi); spe->data.update_data_addr.lo = htole32(data_lo); /* * It's ok if the actual decrement is issued towards the memory * somewhere between the lock and unlock. Thus no more explict * memory barrier is needed. */ if (common) { atomic_subtract_acq_long(&sc->eq_spq_left, 1); } else { atomic_subtract_acq_long(&sc->cq_spq_left, 1); } BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); BLOGD(sc, DBG_SP, "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", sc->spq_prod_idx, (uint32_t)U64_HI(sc->spq_dma.paddr), (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), command, common, HW_CID(sc, cid), data_hi, data_lo, type, atomic_load_acq_long(&sc->cq_spq_left), atomic_load_acq_long(&sc->eq_spq_left)); bxe_sp_prod_update(sc); BXE_SP_UNLOCK(sc); return (0); } /** * bxe_debug_print_ind_table - prints the indirection table configuration. * * @sc: driver hanlde * @p: pointer to rss configuration */ /* * FreeBSD Device probe function. * * Compares the device found to the driver's list of supported devices and * reports back to the bsd loader whether this is the right driver for the device. * This is the driver entry function called from the "kldload" command. * * Returns: * BUS_PROBE_DEFAULT on success, positive value on failure. */ static int bxe_probe(device_t dev) { struct bxe_softc *sc; struct bxe_device_type *t; char *descbuf; uint16_t did, sdid, svid, vid; /* Find our device structure */ sc = device_get_softc(dev); sc->dev = dev; t = bxe_devs; /* Get the data for the device to be probed. */ vid = pci_get_vendor(dev); did = pci_get_device(dev); svid = pci_get_subvendor(dev); sdid = pci_get_subdevice(dev); BLOGD(sc, DBG_LOAD, "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); /* Look through the list of known devices for a match. */ while (t->bxe_name != NULL) { if ((vid == t->bxe_vid) && (did == t->bxe_did) && ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); if (descbuf == NULL) return (ENOMEM); /* Print out the device identity. */ snprintf(descbuf, BXE_DEVDESC_MAX, "%s (%c%d) BXE v:%s\n", t->bxe_name, (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), (pci_read_config(dev, PCIR_REVID, 4) & 0xf), BXE_DRIVER_VERSION); device_set_desc_copy(dev, descbuf); free(descbuf, M_TEMP); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void bxe_init_mutexes(struct bxe_softc *sc) { #ifdef BXE_CORE_LOCK_SX snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), "bxe%d_core_lock", sc->unit); sx_init(&sc->core_sx, sc->core_sx_name); #else snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), "bxe%d_core_lock", sc->unit); mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); #endif snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), "bxe%d_sp_lock", sc->unit); mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), "bxe%d_dmae_lock", sc->unit); mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), "bxe%d_phy_lock", sc->unit); mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), "bxe%d_fwmb_lock", sc->unit); mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), "bxe%d_print_lock", sc->unit); mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), "bxe%d_stats_lock", sc->unit); mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), "bxe%d_mcast_lock", sc->unit); mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); } static void bxe_release_mutexes(struct bxe_softc *sc) { #ifdef BXE_CORE_LOCK_SX sx_destroy(&sc->core_sx); #else if (mtx_initialized(&sc->core_mtx)) { mtx_destroy(&sc->core_mtx); } #endif if (mtx_initialized(&sc->sp_mtx)) { mtx_destroy(&sc->sp_mtx); } if (mtx_initialized(&sc->dmae_mtx)) { mtx_destroy(&sc->dmae_mtx); } if (mtx_initialized(&sc->port.phy_mtx)) { mtx_destroy(&sc->port.phy_mtx); } if (mtx_initialized(&sc->fwmb_mtx)) { mtx_destroy(&sc->fwmb_mtx); } if (mtx_initialized(&sc->print_mtx)) { mtx_destroy(&sc->print_mtx); } if (mtx_initialized(&sc->stats_mtx)) { mtx_destroy(&sc->stats_mtx); } if (mtx_initialized(&sc->mcast_mtx)) { mtx_destroy(&sc->mcast_mtx); } } static void bxe_tx_disable(struct bxe_softc* sc) { if_t ifp = sc->ifp; /* tell the stack the driver is stopped and TX queue is full */ if (ifp != NULL) { if_setdrvflags(ifp, 0); } } static void bxe_drv_pulse(struct bxe_softc *sc) { SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, sc->fw_drv_pulse_wr_seq); } static inline uint16_t bxe_tx_avail(struct bxe_softc *sc, struct bxe_fastpath *fp) { int16_t used; uint16_t prod; uint16_t cons; prod = fp->tx_bd_prod; cons = fp->tx_bd_cons; used = SUB_S16(prod, cons); return (int16_t)(sc->tx_ring_size) - used; } static inline int bxe_tx_queue_has_work(struct bxe_fastpath *fp) { uint16_t hw_cons; mb(); /* status block fields can change */ hw_cons = le16toh(*fp->tx_cons_sb); return (hw_cons != fp->tx_pkt_cons); } static inline uint8_t bxe_has_tx_work(struct bxe_fastpath *fp) { /* expand this for multi-cos if ever supported */ return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; } static inline int bxe_has_rx_work(struct bxe_fastpath *fp) { uint16_t rx_cq_cons_sb; mb(); /* status block fields can change */ rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) rx_cq_cons_sb++; return (fp->rx_cq_cons != rx_cq_cons_sb); } static void bxe_sp_event(struct bxe_softc *sc, struct bxe_fastpath *fp, union eth_rx_cqe *rr_cqe) { int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); switch (command) { case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); drv_cmd = ECORE_Q_CMD_UPDATE; break; case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); drv_cmd = ECORE_Q_CMD_SETUP; break; case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; break; case (RAMROD_CMD_ID_ETH_HALT): BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); drv_cmd = ECORE_Q_CMD_HALT; break; case (RAMROD_CMD_ID_ETH_TERMINATE): BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); drv_cmd = ECORE_Q_CMD_TERMINATE; break; case (RAMROD_CMD_ID_ETH_EMPTY): BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); drv_cmd = ECORE_Q_CMD_EMPTY; break; default: BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", command, fp->index); return; } if ((drv_cmd != ECORE_Q_CMD_MAX) && q_obj->complete_cmd(sc, q_obj, drv_cmd)) { /* * q_obj->complete_cmd() failure means that this was * an unexpected completion. * * In this case we don't want to increase the sc->spq_left * because apparently we haven't sent this command the first * place. */ // bxe_panic(sc, ("Unexpected SP completion\n")); return; } atomic_add_acq_long(&sc->cq_spq_left, 1); BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", atomic_load_acq_long(&sc->cq_spq_left)); } /* * The current mbuf is part of an aggregation. Move the mbuf into the TPA * aggregation queue, put an empty mbuf back onto the receive chain, and mark * the current aggregation queue as in-progress. */ static void bxe_tpa_start(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t queue, uint16_t cons, uint16_t prod, struct eth_fast_path_rx_cqe *cqe) { struct bxe_sw_rx_bd tmp_bd; struct bxe_sw_rx_bd *rx_buf; struct eth_rx_bd *rx_bd; int max_agg_queues; struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; uint16_t index; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " "cons=%d prod=%d\n", fp->index, queue, cons, prod); max_agg_queues = MAX_AGG_QS(sc); KASSERT((queue < max_agg_queues), ("fp[%02d] invalid aggr queue (%d >= %d)!", fp->index, queue, max_agg_queues)); KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", fp->index, queue)); /* copy the existing mbuf and mapping from the TPA pool */ tmp_bd = tpa_info->bd; if (tmp_bd.m == NULL) { uint32_t *tmp; tmp = (uint32_t *)cqe; BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n", fp->index, queue, cons, prod); BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); /* XXX Error handling? */ return; } /* change the TPA queue to the start state */ tpa_info->state = BXE_TPA_STATE_START; tpa_info->placement_offset = cqe->placement_offset; tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); tpa_info->vlan_tag = le16toh(cqe->vlan_tag); tpa_info->len_on_bd = le16toh(cqe->len_on_bd); fp->rx_tpa_queue_used |= (1 << queue); /* * If all the buffer descriptors are filled with mbufs then fill in * the current consumer index with a new BD. Else if a maximum Rx * buffer limit is imposed then fill in the next producer index. */ index = (sc->max_rx_bufs != RX_BD_USABLE) ? prod : cons; /* move the received mbuf and mapping to TPA pool */ tpa_info->bd = fp->rx_mbuf_chain[cons]; /* release any existing RX BD mbuf mappings */ if (cons != index) { rx_buf = &fp->rx_mbuf_chain[cons]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* * We get here when the maximum number of rx buffers is less than * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL * it out here without concern of a memory leak. */ fp->rx_mbuf_chain[cons].m = NULL; } /* update the Rx SW BD with the mbuf info from the TPA pool */ fp->rx_mbuf_chain[index] = tmp_bd; /* update the Rx BD with the empty mbuf phys address from the TPA pool */ rx_bd = &fp->rx_chain[index]; rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); } /* * When a TPA aggregation is completed, loop through the individual mbufs * of the aggregation, combining them into a single mbuf which will be sent * up the stack. Refill all freed SGEs with mbufs as we go along. */ static int bxe_fill_frag_mbuf(struct bxe_softc *sc, struct bxe_fastpath *fp, struct bxe_sw_tpa_info *tpa_info, uint16_t queue, uint16_t pages, struct mbuf *m, struct eth_end_agg_rx_cqe *cqe, uint16_t cqe_idx) { struct mbuf *m_frag; uint32_t frag_len, frag_size, i; uint16_t sge_idx; int rc = 0; int j; frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", fp->index, queue, tpa_info->len_on_bd, frag_size, pages); /* make sure the aggregated frame is not too big to handle */ if (pages > 8 * PAGES_PER_SGE) { uint32_t *tmp = (uint32_t *)cqe; BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " "pkt_len=%d len_on_bd=%d frag_size=%d\n", fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), tpa_info->len_on_bd, frag_size); BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); bxe_panic(sc, ("sge page count error\n")); return (EINVAL); } /* * Scan through the scatter gather list pulling individual mbufs into a * single mbuf for the host stack. */ for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); /* * Firmware gives the indices of the SGE as if the ring is an array * (meaning that the "next" element will consume 2 indices). */ frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " "sge_idx=%d frag_size=%d frag_len=%d\n", fp->index, queue, i, j, sge_idx, frag_size, frag_len); m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; /* allocate a new mbuf for the SGE */ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); if (rc) { /* Leave all remaining SGEs in the ring! */ return (rc); } /* update the fragment length */ m_frag->m_len = frag_len; /* concatenate the fragment to the head mbuf */ m_cat(m, m_frag); fp->eth_q_stats.mbuf_alloc_sge--; /* update the TPA mbuf size and remaining fragment size */ m->m_pkthdr.len += frag_len; frag_size -= frag_len; } BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", fp->index, queue, frag_size); return (rc); } static inline void bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) { int i, j; for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; for (j = 0; j < 2; j++) { BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); idx--; } } } static inline void bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) { /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); /* * Clear the two last indices in the page to 1. These are the indices that * correspond to the "next" element, hence will never be indicated and * should be removed from the calculations. */ bxe_clear_sge_mask_next_elems(fp); } static inline void bxe_update_last_max_sge(struct bxe_fastpath *fp, uint16_t idx) { uint16_t last_max = fp->last_max_sge; if (SUB_S16(idx, last_max) > 0) { fp->last_max_sge = idx; } } static inline void bxe_update_sge_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t sge_len, union eth_sgl_or_raw_data *cqe) { uint16_t last_max, last_elem, first_elem; uint16_t delta = 0; uint16_t i; if (!sge_len) { return; } /* first mark all used pages */ for (i = 0; i < sge_len; i++) { BIT_VEC64_CLEAR_BIT(fp->sge_mask, RX_SGE(le16toh(cqe->sgl[i]))); } BLOGD(sc, DBG_LRO, "fp[%02d] fp_cqe->sgl[%d] = %d\n", fp->index, sge_len - 1, le16toh(cqe->sgl[sge_len - 1])); /* assume that the last SGE index is the biggest */ bxe_update_last_max_sge(fp, le16toh(cqe->sgl[sge_len - 1])); last_max = RX_SGE(fp->last_max_sge); last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; /* if ring is not full */ if (last_elem + 1 != first_elem) { last_elem++; } /* now update the prod */ for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { if (__predict_true(fp->sge_mask[i])) { break; } fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; delta += BIT_VEC64_ELEM_SZ; } if (delta > 0) { fp->rx_sge_prod += delta; /* clear page-end entries */ bxe_clear_sge_mask_next_elems(fp); } BLOGD(sc, DBG_LRO, "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", fp->index, fp->last_max_sge, fp->rx_sge_prod); } /* * The aggregation on the current TPA queue has completed. Pull the individual * mbuf fragments together into a single mbuf, perform all necessary checksum * calculations, and send the resuting mbuf to the stack. */ static void bxe_tpa_stop(struct bxe_softc *sc, struct bxe_fastpath *fp, struct bxe_sw_tpa_info *tpa_info, uint16_t queue, uint16_t pages, struct eth_end_agg_rx_cqe *cqe, uint16_t cqe_idx) { if_t ifp = sc->ifp; struct mbuf *m; int rc = 0; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", fp->index, queue, tpa_info->placement_offset, le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); m = tpa_info->bd.m; /* allocate a replacement before modifying existing mbuf */ rc = bxe_alloc_rx_tpa_mbuf(fp, queue); if (rc) { /* drop the frame and log an error */ fp->eth_q_stats.rx_soft_errors++; goto bxe_tpa_stop_exit; } /* we have a replacement, fixup the current mbuf */ m_adj(m, tpa_info->placement_offset); m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; /* mark the checksums valid (taken care of by the firmware) */ fp->eth_q_stats.rx_ofld_frames_csum_ip++; fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; m->m_pkthdr.csum_data = 0xffff; m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); /* aggregate all of the SGEs into a single mbuf */ rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); if (rc) { /* drop the packet and log an error */ fp->eth_q_stats.rx_soft_errors++; m_freem(m); } else { if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) { m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; m->m_flags |= M_VLANTAG; } /* assign packet to this interface interface */ if_setrcvif(m, ifp); #if __FreeBSD_version >= 800000 /* specify what RSS queue was used for this flow */ m->m_pkthdr.flowid = fp->index; BXE_SET_FLOWID(m); #endif if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); fp->eth_q_stats.rx_tpa_pkts++; /* pass the frame to the stack */ if_input(ifp, m); } /* we passed an mbuf up the stack or dropped the frame */ fp->eth_q_stats.mbuf_alloc_tpa--; bxe_tpa_stop_exit: fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; fp->rx_tpa_queue_used &= ~(1 << queue); } static uint8_t bxe_service_rxsgl( struct bxe_fastpath *fp, uint16_t len, uint16_t lenonbd, struct mbuf *m, struct eth_fast_path_rx_cqe *cqe_fp) { struct mbuf *m_frag; uint16_t frags, frag_len; uint16_t sge_idx = 0; uint16_t j; uint8_t i, rc = 0; uint32_t frag_size; /* adjust the mbuf */ m->m_len = lenonbd; frag_size = len - lenonbd; frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); m_frag->m_len = frag_len; /* allocate a new mbuf for the SGE */ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); if (rc) { /* Leave all remaining SGEs in the ring! */ return (rc); } fp->eth_q_stats.mbuf_alloc_sge--; /* concatenate the fragment to the head mbuf */ m_cat(m, m_frag); frag_size -= frag_len; } bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); return rc; } static uint8_t bxe_rxeof(struct bxe_softc *sc, struct bxe_fastpath *fp) { if_t ifp = sc->ifp; uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; int rx_pkts = 0; int rc = 0; BXE_FP_RX_LOCK(fp); /* CQ "next element" is of the size of the regular element */ hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { hw_cq_cons++; } bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; bd_prod_fw = bd_prod; sw_cq_cons = fp->rx_cq_cons; sw_cq_prod = fp->rx_cq_prod; /* * Memory barrier necessary as speculative reads of the rx * buffer can be ahead of the index in the status block */ rmb(); BLOGD(sc, DBG_RX, "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", fp->index, hw_cq_cons, sw_cq_cons); while (sw_cq_cons != hw_cq_cons) { struct bxe_sw_rx_bd *rx_buf = NULL; union eth_rx_cqe *cqe; struct eth_fast_path_rx_cqe *cqe_fp; uint8_t cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; uint16_t len, lenonbd, pad; struct mbuf *m = NULL; comp_ring_cons = RCQ(sw_cq_cons); bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); cqe = &fp->rcq_chain[comp_ring_cons]; cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; BLOGD(sc, DBG_RX, "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " "BD prod=%d cons=%d CQE type=0x%x err=0x%x " "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", fp->index, hw_cq_cons, sw_cq_cons, bd_prod, bd_cons, CQE_TYPE(cqe_fp_flags), cqe_fp_flags, cqe_fp->status_flags, le32toh(cqe_fp->rss_hash_result), le16toh(cqe_fp->vlan_tag), le16toh(cqe_fp->pkt_len_or_gro_seg_len), le16toh(cqe_fp->len_on_bd)); /* is this a slowpath msg? */ if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { bxe_sp_event(sc, fp, cqe); goto next_cqe; } rx_buf = &fp->rx_mbuf_chain[bd_cons]; if (!CQE_TYPE_FAST(cqe_fp_type)) { struct bxe_sw_tpa_info *tpa_info; uint16_t frag_size, pages; uint8_t queue; if (CQE_TYPE_START(cqe_fp_type)) { bxe_tpa_start(sc, fp, cqe_fp->queue_index, bd_cons, bd_prod, cqe_fp); m = NULL; /* packet not ready yet */ goto next_rx; } KASSERT(CQE_TYPE_STOP(cqe_fp_type), ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); queue = cqe->end_agg_cqe.queue_index; tpa_info = &fp->rx_tpa_info[queue]; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", fp->index, queue); frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - tpa_info->len_on_bd); pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; bxe_tpa_stop(sc, fp, tpa_info, queue, pages, &cqe->end_agg_cqe, comp_ring_cons); bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); goto next_cqe; } /* non TPA */ /* is this an error packet? */ if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); fp->eth_q_stats.rx_soft_errors++; goto next_rx; } len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); lenonbd = le16toh(cqe_fp->len_on_bd); pad = cqe_fp->placement_offset; m = rx_buf->m; if (__predict_false(m == NULL)) { BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", bd_cons, fp->index); goto next_rx; } /* XXX double copy if packet length under a threshold */ /* * If all the buffer descriptors are filled with mbufs then fill in * the current consumer index with a new BD. Else if a maximum Rx * buffer limit is imposed then fill in the next producer index. */ rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, (sc->max_rx_bufs != RX_BD_USABLE) ? bd_prod : bd_cons); if (rc != 0) { /* we simply reuse the received mbuf and don't post it to the stack */ m = NULL; BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", fp->index, rc); fp->eth_q_stats.rx_soft_errors++; if (sc->max_rx_bufs != RX_BD_USABLE) { /* copy this consumer index to the producer index */ memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, sizeof(struct bxe_sw_rx_bd)); memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); } goto next_rx; } /* current mbuf was detached from the bd */ fp->eth_q_stats.mbuf_alloc_rx--; /* we allocated a replacement mbuf, fixup the current one */ m_adj(m, pad); m->m_pkthdr.len = m->m_len = len; if ((len > 60) && (len > lenonbd)) { fp->eth_q_stats.rx_bxe_service_rxsgl++; rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); if (rc) break; fp->eth_q_stats.rx_jumbo_sge_pkts++; } else if (lenonbd < len) { fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; } /* assign packet to this interface interface */ if_setrcvif(m, ifp); /* assume no hardware checksum has complated */ m->m_pkthdr.csum_flags = 0; /* validate checksum if offload enabled */ if (if_getcapenable(ifp) & IFCAP_RXCSUM) { /* check for a valid IP frame */ if (!(cqe->fast_path_cqe.status_flags & ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { fp->eth_q_stats.rx_hw_csum_errors++; } else { fp->eth_q_stats.rx_ofld_frames_csum_ip++; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } } /* check for a valid TCP/UDP frame */ if (!(cqe->fast_path_cqe.status_flags & ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { fp->eth_q_stats.rx_hw_csum_errors++; } else { fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; m->m_pkthdr.csum_data = 0xFFFF; m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); } } } /* if there is a VLAN tag then flag that info */ if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) { m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; m->m_flags |= M_VLANTAG; } #if __FreeBSD_version >= 800000 /* specify what RSS queue was used for this flow */ m->m_pkthdr.flowid = fp->index; BXE_SET_FLOWID(m); #endif next_rx: bd_cons = RX_BD_NEXT(bd_cons); bd_prod = RX_BD_NEXT(bd_prod); bd_prod_fw = RX_BD_NEXT(bd_prod_fw); /* pass the frame to the stack */ if (__predict_true(m != NULL)) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); rx_pkts++; if_input(ifp, m); } next_cqe: sw_cq_prod = RCQ_NEXT(sw_cq_prod); sw_cq_cons = RCQ_NEXT(sw_cq_cons); /* limit spinning on the queue */ if (rc != 0) break; if (rx_pkts == sc->rx_budget) { fp->eth_q_stats.rx_budget_reached++; break; } } /* while work to do */ fp->rx_bd_cons = bd_cons; fp->rx_bd_prod = bd_prod_fw; fp->rx_cq_cons = sw_cq_cons; fp->rx_cq_prod = sw_cq_prod; /* Update producers */ bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); fp->eth_q_stats.rx_pkts += rx_pkts; fp->eth_q_stats.rx_calls++; BXE_FP_RX_UNLOCK(fp); return (sw_cq_cons != hw_cq_cons); } static uint16_t bxe_free_tx_pkt(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t idx) { struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_idx = TX_BD(tx_buf->first_bd); uint16_t new_cons; int nbd; /* unmap the mbuf from non-paged memory */ bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); tx_start_bd = &fp->tx_chain[bd_idx].start_bd; nbd = le16toh(tx_start_bd->nbd) - 1; new_cons = (tx_buf->first_bd + nbd); /* free the mbuf */ if (__predict_true(tx_buf->m != NULL)) { m_freem(tx_buf->m); fp->eth_q_stats.mbuf_alloc_tx--; } else { fp->eth_q_stats.tx_chain_lost_mbuf++; } tx_buf->m = NULL; tx_buf->first_bd = 0; return (new_cons); } /* transmit timeout watchdog */ static int bxe_watchdog(struct bxe_softc *sc, struct bxe_fastpath *fp) { BXE_FP_TX_LOCK(fp); if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { BXE_FP_TX_UNLOCK(fp); return (0); } BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); - if(sc->trigger_grcdump) { - /* taking grcdump */ - bxe_grc_dump(sc); - } BXE_FP_TX_UNLOCK(fp); atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); return (-1); } /* processes transmit completions */ static uint8_t bxe_txeof(struct bxe_softc *sc, struct bxe_fastpath *fp) { if_t ifp = sc->ifp; uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; uint16_t tx_bd_avail; BXE_FP_TX_LOCK_ASSERT(fp); bd_cons = fp->tx_bd_cons; hw_cons = le16toh(*fp->tx_cons_sb); sw_cons = fp->tx_pkt_cons; while (sw_cons != hw_cons) { pkt_cons = TX_BD(sw_cons); BLOGD(sc, DBG_TX, "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", fp->index, hw_cons, sw_cons, pkt_cons); bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); sw_cons++; } fp->tx_pkt_cons = sw_cons; fp->tx_bd_cons = bd_cons; BLOGD(sc, DBG_TX, "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); mb(); tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); } else { if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } if (fp->tx_pkt_prod != fp->tx_pkt_cons) { /* reset the watchdog timer if there are pending transmits */ fp->watchdog_timer = BXE_TX_TIMEOUT; return (TRUE); } else { /* clear watchdog when there are no pending transmits */ fp->watchdog_timer = 0; return (FALSE); } } static void bxe_drain_tx_queues(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i, count; /* wait until all TX fastpath tasks have completed */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; count = 1000; while (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); if (count == 0) { BLOGE(sc, "Timeout waiting for fp[%d] " "transmits to complete!\n", i); bxe_panic(sc, ("tx drain failure\n")); return; } count--; DELAY(1000); rmb(); } } return; } static int bxe_del_all_macs(struct bxe_softc *sc, struct ecore_vlan_mac_obj *mac_obj, int mac_type, uint8_t wait_for_comp) { unsigned long ramrod_flags = 0, vlan_mac_flags = 0; int rc; /* wait for completion of requested */ if (wait_for_comp) { bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); } /* Set the mac type of addresses we want to clear */ bxe_set_bit(mac_type, &vlan_mac_flags); rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc < 0) { BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n", rc, mac_type, wait_for_comp); } return (rc); } static int bxe_fill_accept_flags(struct bxe_softc *sc, uint32_t rx_mode, unsigned long *rx_accept_flags, unsigned long *tx_accept_flags) { /* Clear the flags first */ *rx_accept_flags = 0; *tx_accept_flags = 0; switch (rx_mode) { case BXE_RX_MODE_NONE: /* * 'drop all' supersedes any accept flags that may have been * passed to the function. */ break; case BXE_RX_MODE_NORMAL: bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); break; case BXE_RX_MODE_ALLMULTI: bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); break; case BXE_RX_MODE_PROMISC: /* * According to deffinition of SI mode, iface in promisc mode * should receive matched and unmatched (in resolution of port) * unicast packets. */ bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); if (IS_MF_SI(sc)) { bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); } else { bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); } break; default: BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode); return (-1); } /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ if (rx_mode != BXE_RX_MODE_NONE) { bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); } return (0); } static int bxe_set_q_rx_mode(struct bxe_softc *sc, uint8_t cl_id, unsigned long rx_mode_flags, unsigned long rx_accept_flags, unsigned long tx_accept_flags, unsigned long ramrod_flags) { struct ecore_rx_mode_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* Prepare ramrod parameters */ ramrod_param.cid = 0; ramrod_param.cl_id = cl_id; ramrod_param.rx_mode_obj = &sc->rx_mode_obj; ramrod_param.func_id = SC_FUNC(sc); ramrod_param.pstate = &sc->sp_state; ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); ramrod_param.ramrod_flags = ramrod_flags; ramrod_param.rx_mode_flags = rx_mode_flags; ramrod_param.rx_accept_flags = rx_accept_flags; ramrod_param.tx_accept_flags = tx_accept_flags; rc = ecore_config_rx_mode(sc, &ramrod_param); if (rc < 0) { BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x " "rx_accept_flags 0x%x tx_accept_flags 0x%x " "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id, (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags, (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc); return (rc); } return (0); } static int bxe_set_storm_rx_mode(struct bxe_softc *sc) { unsigned long rx_mode_flags = 0, ramrod_flags = 0; unsigned long rx_accept_flags = 0, tx_accept_flags = 0; int rc; rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, &tx_accept_flags); if (rc) { return (rc); } bxe_set_bit(RAMROD_RX, &ramrod_flags); bxe_set_bit(RAMROD_TX, &ramrod_flags); /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, rx_accept_flags, tx_accept_flags, ramrod_flags)); } /* returns the "mcp load_code" according to global load_count array */ static int bxe_nic_load_no_mcp(struct bxe_softc *sc) { int path = SC_PATH(sc); int port = SC_PORT(sc); BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]++; load_count[path][1 + port]++; BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 1) { return (FW_MSG_CODE_DRV_LOAD_COMMON); } else if (load_count[path][1 + port] == 1) { return (FW_MSG_CODE_DRV_LOAD_PORT); } else { return (FW_MSG_CODE_DRV_LOAD_FUNCTION); } } /* returns the "mcp load_code" according to global load_count array */ static int bxe_nic_unload_no_mcp(struct bxe_softc *sc) { int port = SC_PORT(sc); int path = SC_PATH(sc); BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]--; load_count[path][1 + port]--; BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 0) { return (FW_MSG_CODE_DRV_UNLOAD_COMMON); } else if (load_count[path][1 + port] == 0) { return (FW_MSG_CODE_DRV_UNLOAD_PORT); } else { return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); } } /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ static uint32_t bxe_send_unload_req(struct bxe_softc *sc, int unload_mode) { uint32_t reset_code = 0; /* Select the UNLOAD request mode */ if (unload_mode == UNLOAD_NORMAL) { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } else { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } /* Send the request to the MCP */ if (!BXE_NOMCP(sc)) { reset_code = bxe_fw_command(sc, reset_code, 0); } else { reset_code = bxe_nic_unload_no_mcp(sc); } return (reset_code); } /* send UNLOAD_DONE command to the MCP */ static void bxe_send_unload_done(struct bxe_softc *sc, uint8_t keep_link) { uint32_t reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; /* Report UNLOAD_DONE to MCP */ if (!BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); } } static int bxe_func_wait_started(struct bxe_softc *sc) { int tout = 50; if (!sc->port.pmf) { return (0); } /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TX disable/enable transaction * 1. Sync IRS for default SB * 2. Sync SP queue - this guarantees us that attention handling started * 3. Wait, that TX disable/enable transaction completes * * 1+2 guarantee that if DCBX attention was scheduled it already changed * pending bit of transaction from STARTED-->TX_STOPPED, if we already * received completion for the transaction the state is TX_STOPPED. * State will return to STARTED after completion of TX_STOPPED-->STARTED * transaction. */ /* XXX make sure default SB ISR is done */ /* need a way to synchronize an irq (intr_mtx?) */ /* XXX flush any work queues */ while (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED && tout--) { DELAY(20000); } if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { /* * Failed to complete the transaction in a "good way" * Force both transactions with CLR bit. */ struct ecore_func_state_params func_params = { NULL }; BLOGE(sc, "Unexpected function state! " "Forcing STARTED-->TX_STOPPED-->STARTED\n"); func_params.f_obj = &sc->func_obj; bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); /* STARTED-->TX_STOPPED */ func_params.cmd = ECORE_F_CMD_TX_STOP; ecore_func_state_change(sc, &func_params); /* TX_STOPPED-->STARTED */ func_params.cmd = ECORE_F_CMD_TX_START; return (ecore_func_state_change(sc, &func_params)); } return (0); } static int bxe_stop_queue(struct bxe_softc *sc, int index) { struct bxe_fastpath *fp = &sc->fp[index]; struct ecore_queue_state_params q_params = { NULL }; int rc; BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); q_params.q_obj = &sc->sp_objs[fp->index].q_obj; /* We want to wait for completion in this context */ bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* Stop the primary connection: */ /* ...halt the connection */ q_params.cmd = ECORE_Q_CMD_HALT; rc = ecore_queue_state_change(sc, &q_params); if (rc) { return (rc); } /* ...terminate the connection */ q_params.cmd = ECORE_Q_CMD_TERMINATE; memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; rc = ecore_queue_state_change(sc, &q_params); if (rc) { return (rc); } /* ...delete cfc entry */ q_params.cmd = ECORE_Q_CMD_CFC_DEL; memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; return (ecore_queue_state_change(sc, &q_params)); } /* wait for the outstanding SP commands */ static inline uint8_t bxe_wait_sp_comp(struct bxe_softc *sc, unsigned long mask) { unsigned long tmp; int tout = 5000; /* wait for 5 secs tops */ while (tout--) { mb(); if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { return (TRUE); } DELAY(1000); } mb(); tmp = atomic_load_acq_long(&sc->sp_state); if (tmp & mask) { BLOGE(sc, "Filtering completion timed out: " "sp_state 0x%lx, mask 0x%lx\n", tmp, mask); return (FALSE); } return (FALSE); } static int bxe_func_stop(struct bxe_softc *sc) { struct ecore_func_state_params func_params = { NULL }; int rc; /* prepare parameters for function state transitions */ bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_STOP; /* * Try to stop the function the 'good way'. If it fails (in case * of a parity error during bxe_chip_cleanup()) and we are * not in a debug mode, perform a state transaction in order to * enable further HW_RESET transaction. */ rc = ecore_func_state_change(sc, &func_params); if (rc) { BLOGE(sc, "FUNC_STOP ramrod failed. " "Running a dry transaction (%d)\n", rc); bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); return (ecore_func_state_change(sc, &func_params)); } return (0); } static int bxe_reset_hw(struct bxe_softc *sc, uint32_t load_code) { struct ecore_func_state_params func_params = { NULL }; /* Prepare parameters for function state transitions */ bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_HW_RESET; func_params.params.hw_init.load_phase = load_code; return (ecore_func_state_change(sc, &func_params)); } static void bxe_int_disable_sync(struct bxe_softc *sc, int disable_hw) { if (disable_hw) { /* prevent the HW from sending interrupts */ bxe_int_disable(sc); } /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ /* make sure all ISRs are done */ /* XXX make sure sp_task is not running */ /* cancel and flush work queues */ } static void bxe_chip_cleanup(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link) { int port = SC_PORT(sc); struct ecore_mcast_ramrod_params rparam = { NULL }; uint32_t reset_code; int i, rc = 0; bxe_drain_tx_queues(sc); /* give HW time to discard old tx messages */ DELAY(1000); /* Clean all ETH MACs */ rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); if (rc < 0) { BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); } /* Clean up UC list */ rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); if (rc < 0) { BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); } /* Disable LLH */ if (!CHIP_IS_E1(sc)) { REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); } /* Set "drop all" to stop Rx */ /* * We need to take the BXE_MCAST_LOCK() here in order to prevent * a race between the completion code and this code. */ BXE_MCAST_LOCK(sc); if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); } else { bxe_set_storm_rx_mode(sc); } /* Clean up multicast configuration */ rparam.mcast_obj = &sc->mcast_obj; rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); } BXE_MCAST_UNLOCK(sc); // XXX bxe_iov_chip_cleanup(sc); /* * Send the UNLOAD_REQUEST to the MCP. This will return if * this function should perform FUNCTION, PORT, or COMMON HW * reset. */ reset_code = bxe_send_unload_req(sc, unload_mode); /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TX disable/enable transaction */ rc = bxe_func_wait_started(sc); if (rc) { BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc); } /* * Close multi and leading connections * Completions for ramrods are collected in a synchronous way */ for (i = 0; i < sc->num_queues; i++) { if (bxe_stop_queue(sc, i)) { goto unload_error; } } /* * If SP settings didn't get completed so far - something * very wrong has happen. */ if (!bxe_wait_sp_comp(sc, ~0x0UL)) { BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc); } unload_error: rc = bxe_func_stop(sc); if (rc) { BLOGE(sc, "Function stop failed!(%d)\n", rc); } /* disable HW interrupts */ bxe_int_disable_sync(sc, TRUE); /* detach interrupts */ bxe_interrupt_detach(sc); /* Reset the chip */ rc = bxe_reset_hw(sc, reset_code); if (rc) { BLOGE(sc, "Hardware reset failed(%d)\n", rc); } /* Report UNLOAD_DONE to MCP */ bxe_send_unload_done(sc, keep_link); } static void bxe_disable_close_the_gate(struct bxe_softc *sc) { uint32_t val; int port = SC_PORT(sc); BLOGD(sc, DBG_LOAD, "Disabling 'close the gates'\n"); if (CHIP_IS_E1(sc)) { uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; val = REG_RD(sc, addr); val &= ~(0x300); REG_WR(sc, addr, val); } else { val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); } } /* * Cleans the object that have internal lists without sending * ramrods. Should be run when interrutps are disabled. */ static void bxe_squeeze_objects(struct bxe_softc *sc) { unsigned long ramrod_flags = 0, vlan_mac_flags = 0; struct ecore_mcast_ramrod_params rparam = { NULL }; struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; int rc; /* Cleanup MACs' object first... */ /* Wait for completion of requested */ bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Perform a dry cleanup */ bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); /* Clean ETH primary MAC */ bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) { BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); } /* Cleanup UC list */ vlan_mac_flags = 0; bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) { BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); } /* Now clean mcast object... */ rparam.mcast_obj = &sc->mcast_obj; bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); /* Add a DEL command... */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); } /* now wait until all pending commands are cleared */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); while (rc != 0) { if (rc < 0) { BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); return; } rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); } } /* stop the controller */ static __noinline int bxe_nic_unload(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link) { uint8_t global = FALSE; uint32_t val; int i; BXE_CORE_LOCK_ASSERT(sc); if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); for (i = 0; i < sc->num_queues; i++) { struct bxe_fastpath *fp; fp = &sc->fp[i]; BXE_FP_TX_LOCK(fp); BXE_FP_TX_UNLOCK(fp); } BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); /* mark driver as unloaded in shmem2 */ if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); } if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { /* * We can get here if the driver has been unloaded * during parity error recovery and is either waiting for a * leader to complete or for other functions to unload and * then ifconfig down has been issued. In this case we want to * unload and let other functions to complete a recovery * process. */ sc->recovery_state = BXE_RECOVERY_DONE; sc->is_leader = 0; bxe_release_leader_lock(sc); mb(); BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x" " state = 0x%x\n", sc->recovery_state, sc->state); return (-1); } /* * Nothing to do during unload if previous bxe_nic_load() * did not completed succesfully - all resourses are released. */ if ((sc->state == BXE_STATE_CLOSED) || (sc->state == BXE_STATE_ERROR)) { return (0); } sc->state = BXE_STATE_CLOSING_WAITING_HALT; mb(); /* stop tx */ bxe_tx_disable(sc); sc->rx_mode = BXE_RX_MODE_NONE; /* XXX set rx mode ??? */ if (IS_PF(sc) && !sc->grcdump_done) { /* set ALWAYS_ALIVE bit in shmem */ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bxe_drv_pulse(sc); bxe_stats_handle(sc, STATS_EVENT_STOP); bxe_save_statistics(sc); } /* wait till consumers catch up with producers in all queues */ bxe_drain_tx_queues(sc); /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations */ if (IS_VF(sc)) { ; /* bxe_vfpf_close_vf(sc); */ } else if (unload_mode != UNLOAD_RECOVERY) { /* if this is a normal/close unload need to clean up chip */ if (!sc->grcdump_done) bxe_chip_cleanup(sc, unload_mode, keep_link); } else { /* Send the UNLOAD_REQUEST to the MCP */ bxe_send_unload_req(sc, unload_mode); /* * Prevent transactions to host from the functions on the * engine that doesn't reset global blocks in case of global * attention once gloabl blocks are reset and gates are opened * (the engine which leader will perform the recovery * last). */ if (!CHIP_IS_E1x(sc)) { bxe_pf_disable(sc); } /* disable HW interrupts */ bxe_int_disable_sync(sc, TRUE); /* detach interrupts */ bxe_interrupt_detach(sc); /* Report UNLOAD_DONE to MCP */ bxe_send_unload_done(sc, FALSE); } /* * At this stage no more interrupts will arrive so we may safely clean * the queue'able objects here in case they failed to get cleaned so far. */ if (IS_PF(sc)) { bxe_squeeze_objects(sc); } /* There should be no more pending SP commands at this stage */ sc->sp_state = 0; sc->port.pmf = 0; bxe_free_fp_buffers(sc); if (IS_PF(sc)) { bxe_free_mem(sc); } bxe_free_fw_stats_mem(sc); sc->state = BXE_STATE_CLOSED; /* * Check if there are pending parity attentions. If there are - set * RECOVERY_IN_PROGRESS. */ if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { bxe_set_reset_in_progress(sc); /* Set RESET_IS_GLOBAL if needed */ if (global) { bxe_set_reset_global(sc); } } /* * The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ if (IS_PF(sc) && !bxe_clear_pf_load(sc) && bxe_reset_is_done(sc, SC_PATH(sc))) { bxe_disable_close_the_gate(sc); } BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); return (0); } /* * Called by the OS to set various media options (i.e. link, speed, etc.) when * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". */ static int bxe_ifmedia_update(struct ifnet *ifp) { struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); struct ifmedia *ifm; ifm = &sc->ifmedia; /* We only support Ethernet media type. */ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { return (EINVAL); } switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; case IFM_10G_CX4: case IFM_10G_SR: case IFM_10G_T: case IFM_10G_TWINAX: default: /* We don't support changing the media type. */ BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", IFM_SUBTYPE(ifm->ifm_media)); return (EINVAL); } return (0); } /* * Called by the OS to get the current media status (i.e. link, speed, etc.). */ static void bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct bxe_softc *sc = if_getsoftc(ifp); /* Report link down if the driver isn't running. */ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { ifmr->ifm_active |= IFM_NONE; return; } /* Setup the default interface info. */ ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (sc->link_vars.link_up) { ifmr->ifm_status |= IFM_ACTIVE; } else { ifmr->ifm_active |= IFM_NONE; return; } ifmr->ifm_active |= sc->media; if (sc->link_vars.duplex == DUPLEX_FULL) { ifmr->ifm_active |= IFM_FDX; } else { ifmr->ifm_active |= IFM_HDX; } } static int bxe_ioctl_nvram(struct bxe_softc *sc, uint32_t priv_op, struct ifreq *ifr) { struct bxe_nvram_data nvdata_base; struct bxe_nvram_data *nvdata; int len; int error = 0; copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base)); len = (sizeof(struct bxe_nvram_data) + nvdata_base.len - sizeof(uint32_t)); if (len > sizeof(struct bxe_nvram_data)) { if ((nvdata = (struct bxe_nvram_data *) malloc(len, M_DEVBUF, (M_NOWAIT | M_ZERO))) == NULL) { BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed priv_op 0x%x " " len = 0x%x\n", priv_op, len); return (1); } memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data)); } else { nvdata = &nvdata_base; } if (priv_op == BXE_IOC_RD_NVRAM) { BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n", nvdata->offset, nvdata->len); error = bxe_nvram_read(sc, nvdata->offset, (uint8_t *)nvdata->value, nvdata->len); copyout(nvdata, ifr->ifr_data, len); } else { /* BXE_IOC_WR_NVRAM */ BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n", nvdata->offset, nvdata->len); copyin(ifr->ifr_data, nvdata, len); error = bxe_nvram_write(sc, nvdata->offset, (uint8_t *)nvdata->value, nvdata->len); } if (len > sizeof(struct bxe_nvram_data)) { free(nvdata, M_DEVBUF); } return (error); } static int bxe_ioctl_stats_show(struct bxe_softc *sc, uint32_t priv_op, struct ifreq *ifr) { const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN); const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t)); caddr_t p_tmp; uint32_t *offset; int i; switch (priv_op) { case BXE_IOC_STATS_SHOW_NUM: memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data)); ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num = BXE_NUM_ETH_STATS; ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len = STAT_NAME_LEN; return (0); case BXE_IOC_STATS_SHOW_STR: memset(ifr->ifr_data, 0, str_size); p_tmp = ifr->ifr_data; for (i = 0; i < BXE_NUM_ETH_STATS; i++) { strcpy(p_tmp, bxe_eth_stats_arr[i].string); p_tmp += STAT_NAME_LEN; } return (0); case BXE_IOC_STATS_SHOW_CNT: memset(ifr->ifr_data, 0, stats_size); p_tmp = ifr->ifr_data; for (i = 0; i < BXE_NUM_ETH_STATS; i++) { offset = ((uint32_t *)&sc->eth_stats + bxe_eth_stats_arr[i].offset); switch (bxe_eth_stats_arr[i].size) { case 4: *((uint64_t *)p_tmp) = (uint64_t)*offset; break; case 8: *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1)); break; default: *((uint64_t *)p_tmp) = 0; } p_tmp += sizeof(uint64_t); } return (0); default: return (-1); } } static void bxe_handle_chip_tq(void *context, int pending) { struct bxe_softc *sc = (struct bxe_softc *)context; long work = atomic_load_acq_long(&sc->chip_tq_flags); switch (work) { case CHIP_TQ_REINIT: if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { /* restart the interface */ BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_stop_locked(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } break; default: break; } } /* * Handles any IOCTL calls from the operating system. * * Returns: * 0 = Success, >0 Failure */ static int bxe_ioctl(if_t ifp, u_long command, caddr_t data) { struct bxe_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; struct bxe_nvram_data *nvdata; uint32_t priv_op; int mask = 0; int reinit = 0; int error = 0; int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); switch (command) { case SIOCSIFMTU: BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", ifr->ifr_mtu); if (sc->mtu == ifr->ifr_mtu) { /* nothing to change */ break; } if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", ifr->ifr_mtu, mtu_min, mtu_max); error = EINVAL; break; } atomic_store_rel_int((volatile unsigned int *)&sc->mtu, (unsigned long)ifr->ifr_mtu); /* atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), (unsigned long)ifr->ifr_mtu); XXX - Not sure why it needs to be atomic */ if_setmtu(ifp, ifr->ifr_mtu); reinit = 1; break; case SIOCSIFFLAGS: /* toggle the interface state up or down */ BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); BXE_CORE_LOCK(sc); /* check if the interface is up */ if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* set the receive mode flags */ bxe_set_rx_mode(sc); } else if(sc->state != BXE_STATE_DISABLED) { bxe_init_locked(sc); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); bxe_stop_locked(sc); } } BXE_CORE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* add/delete multicast addresses */ BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); /* check if the interface is up */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* set the receive mode flags */ BXE_CORE_LOCK(sc); bxe_set_rx_mode(sc); BXE_CORE_UNLOCK(sc); } break; case SIOCSIFCAP: /* find out which capabilities have changed */ mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", mask); /* toggle the LRO capabilites enable flag */ if (mask & IFCAP_LRO) { if_togglecapenable(ifp, IFCAP_LRO); BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); reinit = 1; } /* toggle the TXCSUM checksum capabilites enable flag */ if (mask & IFCAP_TXCSUM) { if_togglecapenable(ifp, IFCAP_TXCSUM); BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); if (if_getcapenable(ifp) & IFCAP_TXCSUM) { if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); } else { if_clearhwassist(ifp); /* XXX */ } } /* toggle the RXCSUM checksum capabilities enable flag */ if (mask & IFCAP_RXCSUM) { if_togglecapenable(ifp, IFCAP_RXCSUM); BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); if (if_getcapenable(ifp) & IFCAP_RXCSUM) { if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); } else { if_clearhwassist(ifp); /* XXX */ } } /* toggle TSO4 capabilities enabled flag */ if (mask & IFCAP_TSO4) { if_togglecapenable(ifp, IFCAP_TSO4); BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); } /* toggle TSO6 capabilities enabled flag */ if (mask & IFCAP_TSO6) { if_togglecapenable(ifp, IFCAP_TSO6); BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); } /* toggle VLAN_HWTSO capabilities enabled flag */ if (mask & IFCAP_VLAN_HWTSO) { if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); } /* toggle VLAN_HWCSUM capabilities enabled flag */ if (mask & IFCAP_VLAN_HWCSUM) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); error = EINVAL; } /* toggle VLAN_MTU capabilities enable flag */ if (mask & IFCAP_VLAN_MTU) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); error = EINVAL; } /* toggle VLAN_HWTAGGING capabilities enabled flag */ if (mask & IFCAP_VLAN_HWTAGGING) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); error = EINVAL; } /* toggle VLAN_HWFILTER capabilities enabled flag */ if (mask & IFCAP_VLAN_HWFILTER) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); error = EINVAL; } /* XXX not yet... * IFCAP_WOL_MAGIC */ break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: /* set/get interface media */ BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", (command & 0xff)); error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; case SIOCGPRIVATE_0: copyin(ifr->ifr_data, &priv_op, sizeof(priv_op)); switch (priv_op) { case BXE_IOC_RD_NVRAM: case BXE_IOC_WR_NVRAM: nvdata = (struct bxe_nvram_data *)ifr->ifr_data; BLOGD(sc, DBG_IOCTL, "Received Private NVRAM ioctl addr=0x%x size=%u\n", nvdata->offset, nvdata->len); error = bxe_ioctl_nvram(sc, priv_op, ifr); break; case BXE_IOC_STATS_SHOW_NUM: case BXE_IOC_STATS_SHOW_STR: case BXE_IOC_STATS_SHOW_CNT: BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n", priv_op); error = bxe_ioctl_stats_show(sc, priv_op, ifr); break; default: BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op); error = EINVAL; break; } break; default: BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", (command & 0xff)); error = ether_ioctl(ifp, command, data); break; } if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { BLOGD(sc, DBG_LOAD | DBG_IOCTL, "Re-initializing hardware from IOCTL change\n"); bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_stop_locked(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } return (error); } static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m, uint8_t contents) { char * type; int i = 0; if (!(sc->debug & DBG_MBUF)) { return; } if (m == NULL) { BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); return; } while (m) { #if __FreeBSD_version >= 1000000 BLOGD(sc, DBG_MBUF, "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); if (m->m_flags & M_PKTHDR) { BLOGD(sc, DBG_MBUF, "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, (int)m->m_pkthdr.csum_flags, CSUM_BITS); } #else BLOGD(sc, DBG_MBUF, "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", i, m, m->m_len, m->m_flags, "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data); if (m->m_flags & M_PKTHDR) { BLOGD(sc, DBG_MBUF, "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", i, m->m_pkthdr.len, m->m_flags, "\20\12M_BCAST\13M_MCAST\14M_FRAG" "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG" "\22M_PROMISC\23M_NOFREE", (int)m->m_pkthdr.csum_flags, "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS" "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" "\12CSUM_IP_VALID\13CSUM_DATA_VALID" "\14CSUM_PSEUDO_HDR"); } #endif /* #if __FreeBSD_version >= 1000000 */ if (m->m_flags & M_EXT) { switch (m->m_ext.ext_type) { case EXT_CLUSTER: type = "EXT_CLUSTER"; break; case EXT_SFBUF: type = "EXT_SFBUF"; break; case EXT_JUMBOP: type = "EXT_JUMBOP"; break; case EXT_JUMBO9: type = "EXT_JUMBO9"; break; case EXT_JUMBO16: type = "EXT_JUMBO16"; break; case EXT_PACKET: type = "EXT_PACKET"; break; case EXT_MBUF: type = "EXT_MBUF"; break; case EXT_NET_DRV: type = "EXT_NET_DRV"; break; case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; case EXT_EXTREF: type = "EXT_EXTREF"; break; default: type = "UNKNOWN"; break; } BLOGD(sc, DBG_MBUF, "%02d: - m_ext: %p ext_size=%d type=%s\n", i, m->m_ext.ext_buf, m->m_ext.ext_size, type); } if (contents) { bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); } m = m->m_next; i++; } } /* * Checks to ensure the 13 bd sliding window is >= MSS for TSO. * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD * The headers comes in a seperate bd in FreeBSD so 13-3=10. * Returns: 0 if OK to send, 1 if packet needs further defragmentation */ static int bxe_chktso_window(struct bxe_softc *sc, int nsegs, bus_dma_segment_t *segs, struct mbuf *m) { uint32_t num_wnds, wnd_size, wnd_sum; int32_t frag_idx, wnd_idx; unsigned short lso_mss; int defrag; defrag = 0; wnd_sum = 0; wnd_size = 10; num_wnds = nsegs - wnd_size; lso_mss = htole16(m->m_pkthdr.tso_segsz); /* * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the * first window sum of data while skipping the first assuming it is the * header in FreeBSD. */ for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { wnd_sum += htole16(segs[frag_idx].ds_len); } /* check the first 10 bd window size */ if (wnd_sum < lso_mss) { return (1); } /* run through the windows */ for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { /* subtract the first mbuf->m_len of the last wndw(-header) */ wnd_sum -= htole16(segs[wnd_idx+1].ds_len); /* add the next mbuf len to the len of our new window */ wnd_sum += htole16(segs[frag_idx].ds_len); if (wnd_sum < lso_mss) { return (1); } } return (0); } static uint8_t bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, struct mbuf *m, uint32_t *parsing_data) { struct ether_vlan_header *eh = NULL; struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; caddr_t ip = NULL; struct tcphdr *th = NULL; int e_hlen, ip_hlen, l4_off; uint16_t proto; if (m->m_pkthdr.csum_flags == CSUM_IP) { /* no L4 checksum offload needed */ return (0); } /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); proto = ntohs(eh->evl_proto); } else { e_hlen = ETHER_HDR_LEN; proto = ntohs(eh->evl_encap_proto); } switch (proto) { case ETHERTYPE_IP: /* get the IP header, if mbuf len < 20 then header in next mbuf */ ip4 = (m->m_len < sizeof(struct ip)) ? (struct ip *)m->m_next->m_data : (struct ip *)(m->m_data + e_hlen); /* ip_hl is number of 32-bit words */ ip_hlen = (ip4->ip_hl << 2); ip = (caddr_t)ip4; break; case ETHERTYPE_IPV6: /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? (struct ip6_hdr *)m->m_next->m_data : (struct ip6_hdr *)(m->m_data + e_hlen); /* XXX cannot support offload with IPv6 extensions */ ip_hlen = sizeof(struct ip6_hdr); ip = (caddr_t)ip6; break; default: /* We can't offload in this case... */ /* XXX error stat ??? */ return (0); } /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ l4_off = (e_hlen + ip_hlen); *parsing_data |= (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_tcp++; th = (struct tcphdr *)(ip + ip_hlen); /* th_off is number of 32-bit words */ *parsing_data |= ((th->th_off << ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); return (l4_off + (th->th_off << 2)); /* entire header length */ } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_udp++; return (l4_off + sizeof(struct udphdr)); /* entire header length */ } else { /* XXX error stat ??? */ return (0); } } static uint8_t bxe_set_pbd_csum(struct bxe_fastpath *fp, struct mbuf *m, struct eth_tx_parse_bd_e1x *pbd) { struct ether_vlan_header *eh = NULL; struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; caddr_t ip = NULL; struct tcphdr *th = NULL; struct udphdr *uh = NULL; int e_hlen, ip_hlen; uint16_t proto; uint8_t hlen; uint16_t tmp_csum; uint32_t *tmp_uh; /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); proto = ntohs(eh->evl_proto); } else { e_hlen = ETHER_HDR_LEN; proto = ntohs(eh->evl_encap_proto); } switch (proto) { case ETHERTYPE_IP: /* get the IP header, if mbuf len < 20 then header in next mbuf */ ip4 = (m->m_len < sizeof(struct ip)) ? (struct ip *)m->m_next->m_data : (struct ip *)(m->m_data + e_hlen); /* ip_hl is number of 32-bit words */ ip_hlen = (ip4->ip_hl << 1); ip = (caddr_t)ip4; break; case ETHERTYPE_IPV6: /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? (struct ip6_hdr *)m->m_next->m_data : (struct ip6_hdr *)(m->m_data + e_hlen); /* XXX cannot support offload with IPv6 extensions */ ip_hlen = (sizeof(struct ip6_hdr) >> 1); ip = (caddr_t)ip6; break; default: /* We can't offload in this case... */ /* XXX error stat ??? */ return (0); } hlen = (e_hlen >> 1); /* note that rest of global_data is indirectly zeroed here */ if (m->m_flags & M_VLANTAG) { pbd->global_data = htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); } else { pbd->global_data = htole16(hlen); } pbd->ip_hlen_w = ip_hlen; hlen += pbd->ip_hlen_w; /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { th = (struct tcphdr *)(ip + (ip_hlen << 1)); /* th_off is number of 32-bit words */ hlen += (uint16_t)(th->th_off << 1); } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { uh = (struct udphdr *)(ip + (ip_hlen << 1)); hlen += (sizeof(struct udphdr) / 2); } else { /* valid case as only CSUM_IP was set */ return (0); } pbd->total_hlen_w = htole16(hlen); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_tcp++; pbd->tcp_pseudo_csum = ntohs(th->th_sum); } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_udp++; /* * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP * checksums and does not know anything about the UDP header and where * the checksum field is located. It only knows about TCP. Therefore * we "lie" to the hardware for outgoing UDP packets w/ checksum * offload. Since the checksum field offset for TCP is 16 bytes and * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 * bytes less than the start of the UDP header. This allows the * hardware to write the checksum in the correct spot. But the * hardware will compute a checksum which includes the last 10 bytes * of the IP header. To correct this we tweak the stack computed * pseudo checksum by folding in the calculation of the inverse * checksum for those final 10 bytes of the IP header. This allows * the correct checksum to be computed by the hardware. */ /* set pointer 10 bytes before UDP header */ tmp_uh = (uint32_t *)((uint8_t *)uh - 10); /* calculate a pseudo header checksum over the first 10 bytes */ tmp_csum = in_pseudo(*tmp_uh, *(tmp_uh + 1), *(uint16_t *)(tmp_uh + 2)); pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); } return (hlen * 2); /* entire header length, number of bytes */ } static void bxe_set_pbd_lso_e2(struct mbuf *m, uint32_t *parsing_data) { *parsing_data |= ((m->m_pkthdr.tso_segsz << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & ETH_TX_PARSE_BD_E2_LSO_MSS); /* XXX test for IPv6 with extension header... */ } static void bxe_set_pbd_lso(struct mbuf *m, struct eth_tx_parse_bd_e1x *pbd) { struct ether_vlan_header *eh = NULL; struct ip *ip = NULL; struct tcphdr *th = NULL; int e_hlen; /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; /* get the IP and TCP header, with LSO entire header in first mbuf */ /* XXX assuming IPv4 */ ip = (struct ip *)(m->m_data + e_hlen); th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); pbd->tcp_send_seq = ntohl(th->th_seq); pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); #if 1 /* XXX IPv4 */ pbd->ip_id = ntohs(ip->ip_id); pbd->tcp_pseudo_csum = ntohs(in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP))); #else /* XXX IPv6 */ pbd->tcp_pseudo_csum = ntohs(in_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htons(IPPROTO_TCP))); #endif pbd->global_data |= htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); } /* * Encapsulte an mbuf cluster into the tx bd chain and makes the memory * visible to the controller. * * If an mbuf is submitted to this routine and cannot be given to the * controller (e.g. it has too many fragments) then the function may free * the mbuf and return to the caller. * * Returns: * 0 = Success, !0 = Failure * Note the side effect that an mbuf may be freed if it causes a problem. */ static int bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) { bus_dma_segment_t segs[32]; struct mbuf *m0; struct bxe_sw_tx_bd *tx_buf; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ struct eth_tx_bd *tx_data_bd; struct eth_tx_bd *tx_total_pkt_size_bd; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_prod, pkt_prod, total_pkt_size; uint8_t mac_type; int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; struct bxe_softc *sc; uint16_t tx_bd_avail; struct ether_vlan_header *eh; uint32_t pbd_e2_parsing_data = 0; uint8_t hlen = 0; int tmp_bd; int i; sc = fp->sc; #if __FreeBSD_version >= 800000 M_ASSERTPKTHDR(*m_head); #endif /* #if __FreeBSD_version >= 800000 */ m0 = *m_head; rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; tx_start_bd = NULL; tx_data_bd = NULL; tx_total_pkt_size_bd = NULL; /* get the H/W pointer for packets and BDs */ pkt_prod = fp->tx_pkt_prod; bd_prod = fp->tx_bd_prod; mac_type = UNICAST_ADDRESS; /* map the mbuf into the next open DMAable memory */ tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); /* mapping errors */ if(__predict_false(error != 0)) { fp->eth_q_stats.tx_dma_mapping_failure++; if (error == ENOMEM) { /* resource issue, try again later */ rc = ENOMEM; } else if (error == EFBIG) { /* possibly recoverable with defragmentation */ fp->eth_q_stats.mbuf_defrag_attempts++; m0 = m_defrag(*m_head, M_NOWAIT); if (m0 == NULL) { fp->eth_q_stats.mbuf_defrag_failures++; rc = ENOBUFS; } else { /* defrag successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { fp->eth_q_stats.tx_dma_mapping_failure++; rc = error; } } } else { /* unknown, unrecoverable mapping error */ BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); bxe_dump_mbuf(sc, m0, FALSE); rc = error; } goto bxe_tx_encap_continue; } tx_bd_avail = bxe_tx_avail(sc, fp); /* make sure there is enough room in the send queue */ if (__predict_false(tx_bd_avail < (nsegs + 2))) { /* Recoverable, try again later. */ fp->eth_q_stats.tx_hw_queue_full++; bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); rc = ENOMEM; goto bxe_tx_encap_continue; } /* capture the current H/W TX chain high watermark */ if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < (TX_BD_USABLE - tx_bd_avail))) { fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); } /* make sure it fits in the packet window */ if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { /* * The mbuf may be to big for the controller to handle. If the frame * is a TSO frame we'll need to do an additional check. */ if (m0->m_pkthdr.csum_flags & CSUM_TSO) { if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { goto bxe_tx_encap_continue; /* OK to send */ } else { fp->eth_q_stats.tx_window_violation_tso++; } } else { fp->eth_q_stats.tx_window_violation_std++; } /* lets try to defragment this mbuf and remap it */ fp->eth_q_stats.mbuf_defrag_attempts++; bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); m0 = m_defrag(*m_head, M_NOWAIT); if (m0 == NULL) { fp->eth_q_stats.mbuf_defrag_failures++; /* Ugh, just drop the frame... :( */ rc = ENOBUFS; } else { /* defrag successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { fp->eth_q_stats.tx_dma_mapping_failure++; /* No sense in trying to defrag/copy chain, drop it. :( */ rc = error; } else { /* if the chain is still too long then drop it */ if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); rc = ENODEV; } } } } bxe_tx_encap_continue: /* Check for errors */ if (rc) { if (rc == ENOMEM) { /* recoverable try again later */ } else { fp->eth_q_stats.tx_soft_errors++; fp->eth_q_stats.mbuf_alloc_tx--; m_freem(*m_head); *m_head = NULL; } return (rc); } /* set flag according to packet type (UNICAST_ADDRESS is default) */ if (m0->m_flags & M_BCAST) { mac_type = BROADCAST_ADDRESS; } else if (m0->m_flags & M_MCAST) { mac_type = MULTICAST_ADDRESS; } /* store the mbuf into the mbuf ring */ tx_buf->m = m0; tx_buf->first_bd = fp->tx_bd_prod; tx_buf->flags = 0; /* prepare the first transmit (start) BD for the mbuf */ tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; BLOGD(sc, DBG_TX, "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); tx_start_bd->nbytes = htole16(segs[0].ds_len); total_pkt_size += tx_start_bd->nbytes; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); /* all frames have at least Start BD + Parsing BD */ nbds = nsegs + 1; tx_start_bd->nbd = htole16(nbds); if (m0->m_flags & M_VLANTAG) { tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); tx_start_bd->bd_flags.as_bitfield |= (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); } else { /* vf tx, start bd must hold the ethertype for fw to enforce it */ if (IS_VF(sc)) { /* map ethernet header to find type and header length */ eh = mtod(m0, struct ether_vlan_header *); tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); } } /* * add a parsing BD from the chain. The parsing BD is always added * though it is only used for TSO and chksum */ bd_prod = TX_BD_NEXT(bd_prod); if (m0->m_pkthdr.csum_flags) { if (m0->m_pkthdr.csum_flags & CSUM_IP) { fp->eth_q_stats.tx_ofld_frames_csum_ip++; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; } if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | ETH_TX_BD_FLAGS_L4_CSUM); } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | ETH_TX_BD_FLAGS_IS_UDP | ETH_TX_BD_FLAGS_L4_CSUM); } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) { tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | ETH_TX_BD_FLAGS_IS_UDP); } } if (!CHIP_IS_E1x(sc)) { pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); if (m0->m_pkthdr.csum_flags) { hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); } SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); } else { uint16_t global_data = 0; pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); if (m0->m_pkthdr.csum_flags) { hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); } SET_FLAG(global_data, ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); pbd_e1x->global_data |= htole16(global_data); } /* setup the parsing BD with TSO specific info */ if (m0->m_pkthdr.csum_flags & CSUM_TSO) { fp->eth_q_stats.tx_ofld_frames_lso++; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; if (__predict_false(tx_start_bd->nbytes > hlen)) { fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; /* split the first BD into header/data making the fw job easy */ nbds++; tx_start_bd->nbd = htole16(nbds); tx_start_bd->nbytes = htole16(hlen); bd_prod = TX_BD_NEXT(bd_prod); /* new transmit BD after the tx_parse_bd */ tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); if (tx_total_pkt_size_bd == NULL) { tx_total_pkt_size_bd = tx_data_bd; } BLOGD(sc, DBG_TX, "TSO split header size is %d (%x:%x) nbds %d\n", le16toh(tx_start_bd->nbytes), le32toh(tx_start_bd->addr_hi), le32toh(tx_start_bd->addr_lo), nbds); } if (!CHIP_IS_E1x(sc)) { bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); } else { bxe_set_pbd_lso(m0, pbd_e1x); } } if (pbd_e2_parsing_data) { pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); } /* prepare remaining BDs, start tx bd contains first seg/frag */ for (i = 1; i < nsegs ; i++) { bd_prod = TX_BD_NEXT(bd_prod); tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); tx_data_bd->nbytes = htole16(segs[i].ds_len); if (tx_total_pkt_size_bd == NULL) { tx_total_pkt_size_bd = tx_data_bd; } total_pkt_size += tx_data_bd->nbytes; } BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); if (tx_total_pkt_size_bd != NULL) { tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; } if (__predict_false(sc->debug & DBG_TX)) { tmp_bd = tx_buf->first_bd; for (i = 0; i < nbds; i++) { if (i == 0) { BLOGD(sc, DBG_TX, "TX Strt: %p bd=%d nbd=%d vlan=0x%x " "bd_flags=0x%x hdr_nbds=%d\n", tx_start_bd, tmp_bd, le16toh(tx_start_bd->nbd), le16toh(tx_start_bd->vlan_or_ethertype), tx_start_bd->bd_flags.as_bitfield, (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); } else if (i == 1) { if (pbd_e1x) { BLOGD(sc, DBG_TX, "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " "tcp_seq=%u total_hlen_w=%u\n", pbd_e1x, tmp_bd, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, le16toh(pbd_e1x->total_hlen_w)); } else { /* if (pbd_e2) */ BLOGD(sc, DBG_TX, "-> Parse: %p bd=%d dst=%02x:%02x:%02x " "src=%02x:%02x:%02x parsing_data=0x%x\n", pbd_e2, tmp_bd, pbd_e2->data.mac_addr.dst_hi, pbd_e2->data.mac_addr.dst_mid, pbd_e2->data.mac_addr.dst_lo, pbd_e2->data.mac_addr.src_hi, pbd_e2->data.mac_addr.src_mid, pbd_e2->data.mac_addr.src_lo, pbd_e2->parsing_data); } } if (i != 1) { /* skip parse db as it doesn't hold data */ tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; BLOGD(sc, DBG_TX, "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", tx_data_bd, tmp_bd, le16toh(tx_data_bd->nbytes), le32toh(tx_data_bd->addr_hi), le32toh(tx_data_bd->addr_lo)); } tmp_bd = TX_BD_NEXT(tmp_bd); } } BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); /* update TX BD producer index value for next TX */ bd_prod = TX_BD_NEXT(bd_prod); /* * If the chain of tx_bd's describing this frame is adjacent to or spans * an eth_tx_next_bd element then we need to increment the nbds value. */ if (TX_BD_IDX(bd_prod) < nbds) { nbds++; } /* don't allow reordering of writes for nbd and packets */ mb(); fp->tx_db.data.prod += nbds; /* producer points to the next free tx_bd at this point */ fp->tx_pkt_prod++; fp->tx_bd_prod = bd_prod; DOORBELL(sc, fp->index, fp->tx_db.raw); fp->eth_q_stats.tx_pkts++; /* Prevent speculative reads from getting ahead of the status block. */ bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_READ); /* Prevent speculative reads from getting ahead of the doorbell. */ bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 0, 0, BUS_SPACE_BARRIER_READ); return (0); } static void bxe_tx_start_locked(struct bxe_softc *sc, if_t ifp, struct bxe_fastpath *fp) { struct mbuf *m = NULL; int tx_count = 0; uint16_t tx_bd_avail; BXE_FP_TX_LOCK_ASSERT(fp); /* keep adding entries while there are frames to send */ while (!if_sendq_empty(ifp)) { /* * check for any frames to send * dequeue can still be NULL even if queue is not empty */ m = if_dequeue(ifp); if (__predict_false(m == NULL)) { break; } /* the mbuf now belongs to us */ fp->eth_q_stats.mbuf_alloc_tx++; /* * Put the frame into the transmit ring. If we don't have room, * place the mbuf back at the head of the TX queue, set the * OACTIVE flag, and wait for the NIC to drain the chain. */ if (__predict_false(bxe_tx_encap(fp, &m))) { fp->eth_q_stats.tx_encap_failures++; if (m != NULL) { /* mark the TX queue as full and return the frame */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); if_sendq_prepend(ifp, m); fp->eth_q_stats.mbuf_alloc_tx--; fp->eth_q_stats.tx_queue_xoff++; } /* stop looking for more work */ break; } /* the frame was enqueued successfully */ tx_count++; /* send a copy of the frame to any BPF listeners. */ if_etherbpfmtap(ifp, m); tx_bd_avail = bxe_tx_avail(sc, fp); /* handle any completions if we're running low */ if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ bxe_txeof(sc, fp); if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { break; } } } /* all TX packets were dequeued and/or the tx ring is full */ if (tx_count > 0) { /* reset the TX watchdog timeout timer */ fp->watchdog_timer = BXE_TX_TIMEOUT; } } /* Legacy (non-RSS) dispatch routine */ static void bxe_tx_start(if_t ifp) { struct bxe_softc *sc; struct bxe_fastpath *fp; sc = if_getsoftc(ifp); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BLOGW(sc, "Interface not running, ignoring transmit request\n"); return; } if (!sc->link_vars.link_up) { BLOGW(sc, "Interface link is down, ignoring transmit request\n"); return; } fp = &sc->fp[0]; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { fp->eth_q_stats.tx_queue_full_return++; return; } BXE_FP_TX_LOCK(fp); bxe_tx_start_locked(sc, ifp, fp); BXE_FP_TX_UNLOCK(fp); } #if __FreeBSD_version >= 800000 static int bxe_tx_mq_start_locked(struct bxe_softc *sc, if_t ifp, struct bxe_fastpath *fp, struct mbuf *m) { struct buf_ring *tx_br = fp->tx_br; struct mbuf *next; int depth, rc, tx_count; uint16_t tx_bd_avail; rc = tx_count = 0; BXE_FP_TX_LOCK_ASSERT(fp); if (!tx_br) { BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); return (EINVAL); } if (!sc->link_vars.link_up || (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { rc = drbr_enqueue(ifp, tx_br, m); goto bxe_tx_mq_start_locked_exit; } /* fetch the depth of the driver queue */ depth = drbr_inuse_drv(ifp, tx_br); if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { fp->eth_q_stats.tx_max_drbr_queue_depth = depth; } if (m == NULL) { /* no new work, check for pending frames */ next = drbr_dequeue_drv(ifp, tx_br); } else if (drbr_needs_enqueue_drv(ifp, tx_br)) { /* have both new and pending work, maintain packet order */ rc = drbr_enqueue(ifp, tx_br, m); if (rc != 0) { fp->eth_q_stats.tx_soft_errors++; goto bxe_tx_mq_start_locked_exit; } next = drbr_dequeue_drv(ifp, tx_br); } else { /* new work only and nothing pending */ next = m; } /* keep adding entries while there are frames to send */ while (next != NULL) { /* the mbuf now belongs to us */ fp->eth_q_stats.mbuf_alloc_tx++; /* * Put the frame into the transmit ring. If we don't have room, * place the mbuf back at the head of the TX queue, set the * OACTIVE flag, and wait for the NIC to drain the chain. */ rc = bxe_tx_encap(fp, &next); if (__predict_false(rc != 0)) { fp->eth_q_stats.tx_encap_failures++; if (next != NULL) { /* mark the TX queue as full and save the frame */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); /* XXX this may reorder the frame */ rc = drbr_enqueue(ifp, tx_br, next); fp->eth_q_stats.mbuf_alloc_tx--; fp->eth_q_stats.tx_frames_deferred++; } /* stop looking for more work */ break; } /* the transmit frame was enqueued successfully */ tx_count++; /* send a copy of the frame to any BPF listeners */ if_etherbpfmtap(ifp, next); tx_bd_avail = bxe_tx_avail(sc, fp); /* handle any completions if we're running low */ if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ bxe_txeof(sc, fp); if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { break; } } next = drbr_dequeue_drv(ifp, tx_br); } /* all TX packets were dequeued and/or the tx ring is full */ if (tx_count > 0) { /* reset the TX watchdog timeout timer */ fp->watchdog_timer = BXE_TX_TIMEOUT; } bxe_tx_mq_start_locked_exit: return (rc); } /* Multiqueue (TSS) dispatch routine. */ static int bxe_tx_mq_start(struct ifnet *ifp, struct mbuf *m) { struct bxe_softc *sc = if_getsoftc(ifp); struct bxe_fastpath *fp; int fp_index, rc; fp_index = 0; /* default is the first queue */ /* check if flowid is set */ if (BXE_VALID_FLOWID(m)) fp_index = (m->m_pkthdr.flowid % sc->num_queues); fp = &sc->fp[fp_index]; if (BXE_FP_TX_TRYLOCK(fp)) { rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); BXE_FP_TX_UNLOCK(fp); } else rc = drbr_enqueue(ifp, fp->tx_br, m); return (rc); } static void bxe_mq_flush(struct ifnet *ifp) { struct bxe_softc *sc = if_getsoftc(ifp); struct bxe_fastpath *fp; struct mbuf *m; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->state != BXE_FP_STATE_OPEN) { BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", fp->index, fp->state); continue; } if (fp->tx_br != NULL) { BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); BXE_FP_TX_LOCK(fp); while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { m_freem(m); } BXE_FP_TX_UNLOCK(fp); } } if_qflush(ifp); } #endif /* FreeBSD_version >= 800000 */ static uint16_t bxe_cid_ilt_lines(struct bxe_softc *sc) { if (IS_SRIOV(sc)) { return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); } return (L2_ILT_LINES(sc)); } static void bxe_ilt_set_info(struct bxe_softc *sc) { struct ilt_client_info *ilt_client; struct ecore_ilt *ilt = sc->ilt; uint16_t line = 0; ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); /* CDU */ ilt_client = &ilt->clients[ILT_CLIENT_CDU]; ilt_client->client_num = ILT_CLIENT_CDU; ilt_client->page_size = CDU_ILT_PAGE_SZ; ilt_client->flags = ILT_CLIENT_SKIP_MEM; ilt_client->start = line; line += bxe_cid_ilt_lines(sc); if (CNIC_SUPPORT(sc)) { line += CNIC_ILT_LINES; } ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[CDU]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* QM */ if (QM_INIT(sc->qm_cid_count)) { ilt_client = &ilt->clients[ILT_CLIENT_QM]; ilt_client->client_num = ILT_CLIENT_QM; ilt_client->page_size = QM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; /* 4 bytes for each cid */ line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, QM_ILT_PAGE_SZ); ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[QM]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } if (CNIC_SUPPORT(sc)) { /* SRC */ ilt_client = &ilt->clients[ILT_CLIENT_SRC]; ilt_client->client_num = ILT_CLIENT_SRC; ilt_client->page_size = SRC_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += SRC_ILT_LINES; ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[SRC]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* TM */ ilt_client = &ilt->clients[ILT_CLIENT_TM]; ilt_client->client_num = ILT_CLIENT_TM; ilt_client->page_size = TM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += TM_ILT_LINES; ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[TM]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); } static void bxe_set_fp_rx_buf_size(struct bxe_softc *sc) { int i; uint32_t rx_buf_size; rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); for (i = 0; i < sc->num_queues; i++) { if(rx_buf_size <= MCLBYTES){ sc->fp[i].rx_buf_size = rx_buf_size; sc->fp[i].mbuf_alloc_size = MCLBYTES; }else if (rx_buf_size <= MJUMPAGESIZE){ sc->fp[i].rx_buf_size = rx_buf_size; sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ sc->fp[i].rx_buf_size = MCLBYTES; sc->fp[i].mbuf_alloc_size = MCLBYTES; }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ sc->fp[i].rx_buf_size = MJUMPAGESIZE; sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; }else { sc->fp[i].rx_buf_size = MCLBYTES; sc->fp[i].mbuf_alloc_size = MCLBYTES; } } } static int bxe_alloc_ilt_mem(struct bxe_softc *sc) { int rc = 0; if ((sc->ilt = (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), M_BXE_ILT, (M_NOWAIT | M_ZERO))) == NULL) { rc = 1; } return (rc); } static int bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) { int rc = 0; if ((sc->ilt->lines = (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), M_BXE_ILT, (M_NOWAIT | M_ZERO))) == NULL) { rc = 1; } return (rc); } static void bxe_free_ilt_mem(struct bxe_softc *sc) { if (sc->ilt != NULL) { free(sc->ilt, M_BXE_ILT); sc->ilt = NULL; } } static void bxe_free_ilt_lines_mem(struct bxe_softc *sc) { if (sc->ilt->lines != NULL) { free(sc->ilt->lines, M_BXE_ILT); sc->ilt->lines = NULL; } } static void bxe_free_mem(struct bxe_softc *sc) { int i; for (i = 0; i < L2_ILT_LINES(sc); i++) { bxe_dma_free(sc, &sc->context[i].vcxt_dma); sc->context[i].vcxt = NULL; sc->context[i].size = 0; } ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); bxe_free_ilt_lines_mem(sc); } static int bxe_alloc_mem(struct bxe_softc *sc) { int context_size; int allocated; int i; /* * Allocate memory for CDU context: * This memory is allocated separately and not in the generic ILT * functions because CDU differs in few aspects: * 1. There can be multiple entities allocating memory for context - * regular L2, CNIC, and SRIOV drivers. Each separately controls * its own ILT lines. * 2. Since CDU page-size is not a single 4KB page (which is the case * for the other ILT clients), to be efficient we want to support * allocation of sub-page-size in the last entry. * 3. Context pointers are used by the driver to pass to FW / update * the context (for the other ILT clients the pointers are used just to * free the memory during unload). */ context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); for (i = 0, allocated = 0; allocated < context_size; i++) { sc->context[i].size = min(CDU_ILT_PAGE_SZ, (context_size - allocated)); if (bxe_dma_alloc(sc, sc->context[i].size, &sc->context[i].vcxt_dma, "cdu context") != 0) { bxe_free_mem(sc); return (-1); } sc->context[i].vcxt = (union cdu_context *)sc->context[i].vcxt_dma.vaddr; allocated += sc->context[i].size; } bxe_alloc_ilt_lines_mem(sc); BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", sc->ilt, sc->ilt->start_line, sc->ilt->lines); { for (i = 0; i < 4; i++) { BLOGD(sc, DBG_LOAD, "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", i, sc->ilt->clients[i].page_size, sc->ilt->clients[i].start, sc->ilt->clients[i].end, sc->ilt->clients[i].client_num, sc->ilt->clients[i].flags); } } if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); bxe_free_mem(sc); return (-1); } return (0); } static void bxe_free_rx_bd_chain(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i; sc = fp->sc; if (fp->rx_mbuf_tag == NULL) { return; } /* free all mbufs and unload all maps */ for (i = 0; i < RX_BD_TOTAL; i++) { if (fp->rx_mbuf_chain[i].m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_mbuf_chain[i].m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_chain[i].m_map); } if (fp->rx_mbuf_chain[i].m != NULL) { m_freem(fp->rx_mbuf_chain[i].m); fp->rx_mbuf_chain[i].m = NULL; fp->eth_q_stats.mbuf_alloc_rx--; } } } static void bxe_free_tpa_pool(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i, max_agg_queues; sc = fp->sc; if (fp->rx_mbuf_tag == NULL) { return; } max_agg_queues = MAX_AGG_QS(sc); /* release all mbufs and unload all DMA maps in the TPA pool */ for (i = 0; i < max_agg_queues; i++) { if (fp->rx_tpa_info[i].bd.m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_tpa_info[i].bd.m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info[i].bd.m_map); } if (fp->rx_tpa_info[i].bd.m != NULL) { m_freem(fp->rx_tpa_info[i].bd.m); fp->rx_tpa_info[i].bd.m = NULL; fp->eth_q_stats.mbuf_alloc_tpa--; } } } static void bxe_free_sge_chain(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i; sc = fp->sc; if (fp->rx_sge_mbuf_tag == NULL) { return; } /* rree all mbufs and unload all maps */ for (i = 0; i < RX_SGE_TOTAL; i++) { if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { bus_dmamap_sync(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[i].m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[i].m_map); } if (fp->rx_sge_mbuf_chain[i].m != NULL) { m_freem(fp->rx_sge_mbuf_chain[i].m); fp->rx_sge_mbuf_chain[i].m = NULL; fp->eth_q_stats.mbuf_alloc_sge--; } } } static void bxe_free_fp_buffers(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; #if __FreeBSD_version >= 800000 if (fp->tx_br != NULL) { /* just in case bxe_mq_flush() wasn't called */ if (mtx_initialized(&fp->tx_mtx)) { struct mbuf *m; BXE_FP_TX_LOCK(fp); while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) m_freem(m); BXE_FP_TX_UNLOCK(fp); } } #endif /* free all RX buffers */ bxe_free_rx_bd_chain(fp); bxe_free_tpa_pool(fp); bxe_free_sge_chain(fp); if (fp->eth_q_stats.mbuf_alloc_rx != 0) { BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_rx); } if (fp->eth_q_stats.mbuf_alloc_sge != 0) { BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_sge); } if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_tpa); } if (fp->eth_q_stats.mbuf_alloc_tx != 0) { BLOGE(sc, "failed to release tx mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_tx); } /* XXX verify all mbufs were reclaimed */ } } static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t prev_index, uint16_t index) { struct bxe_sw_rx_bd *rx_buf; struct eth_rx_bd *rx_bd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs, rc; rc = 0; /* allocate the new RX BD mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; return (ENOBUFS); } fp->eth_q_stats.mbuf_alloc_rx++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = fp->rx_buf_size; /* map the mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; m_freem(m); fp->eth_q_stats.mbuf_alloc_rx--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); /* release any existing RX BD mbuf mappings */ if (prev_index != index) { rx_buf = &fp->rx_mbuf_chain[prev_index]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* * We only get here from bxe_rxeof() when the maximum number * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already * holds the mbuf in the prev_index so it's OK to NULL it out * here without concern of a memory leak. */ fp->rx_mbuf_chain[prev_index].m = NULL; } rx_buf = &fp->rx_mbuf_chain[index]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* save the mbuf and mapping info for a future packet */ map = (prev_index != index) ? fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; rx_buf->m_map = fp->rx_mbuf_spare_map; fp->rx_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_PREREAD); rx_buf->m = m; rx_bd = &fp->rx_chain[index]; rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); return (rc); } static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, int queue) { struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs; int rc = 0; /* allocate the new TPA mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; return (ENOBUFS); } fp->eth_q_stats.mbuf_alloc_tpa++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = fp->rx_buf_size; /* map the mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; m_free(m); fp->eth_q_stats.mbuf_alloc_tpa--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); /* release any existing TPA mbuf mapping */ if (tpa_info->bd.m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); } /* save the mbuf and mapping info for the TPA mbuf */ map = tpa_info->bd.m_map; tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; fp->rx_tpa_info_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, BUS_DMASYNC_PREREAD); tpa_info->bd.m = m; tpa_info->seg = segs[0]; return (rc); } /* * Allocate an mbuf and assign it to the receive scatter gather chain. The * caller must take care to save a copy of the existing mbuf in the SG mbuf * chain. */ static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index) { struct bxe_sw_rx_bd *sge_buf; struct eth_rx_sge *sge; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs; int rc = 0; /* allocate a new SGE mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; return (ENOMEM); } fp->eth_q_stats.mbuf_alloc_sge++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; /* map the SGE mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; m_freem(m); fp->eth_q_stats.mbuf_alloc_sge--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); sge_buf = &fp->rx_sge_mbuf_chain[index]; /* release any existing SGE mbuf mapping */ if (sge_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); } /* save the mbuf and mapping info for a future packet */ map = sge_buf->m_map; sge_buf->m_map = fp->rx_sge_mbuf_spare_map; fp->rx_sge_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, BUS_DMASYNC_PREREAD); sge_buf->m = m; sge = &fp->rx_sge_chain[index]; sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); return (rc); } static __noinline int bxe_alloc_fp_buffers(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i, j, rc = 0; int ring_prod, cqe_ring_prod; int max_agg_queues; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; ring_prod = cqe_ring_prod = 0; fp->rx_bd_cons = 0; fp->rx_cq_cons = 0; /* allocate buffers for the RX BDs in RX BD chain */ for (j = 0; j < sc->max_rx_bufs; j++) { rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", i, rc); goto bxe_alloc_fp_buffers_error; } ring_prod = RX_BD_NEXT(ring_prod); cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); } fp->rx_bd_prod = ring_prod; fp->rx_cq_prod = cqe_ring_prod; fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; max_agg_queues = MAX_AGG_QS(sc); fp->tpa_enable = TRUE; /* fill the TPA pool */ for (j = 0; j < max_agg_queues; j++) { rc = bxe_alloc_rx_tpa_mbuf(fp, j); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", i, j); fp->tpa_enable = FALSE; goto bxe_alloc_fp_buffers_error; } fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; } if (fp->tpa_enable) { /* fill the RX SGE chain */ ring_prod = 0; for (j = 0; j < RX_SGE_USABLE; j++) { rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", i, ring_prod); fp->tpa_enable = FALSE; ring_prod = 0; goto bxe_alloc_fp_buffers_error; } ring_prod = RX_SGE_NEXT(ring_prod); } fp->rx_sge_prod = ring_prod; } } return (0); bxe_alloc_fp_buffers_error: /* unwind what was already allocated */ bxe_free_rx_bd_chain(fp); bxe_free_tpa_pool(fp); bxe_free_sge_chain(fp); return (ENOBUFS); } static void bxe_free_fw_stats_mem(struct bxe_softc *sc) { bxe_dma_free(sc, &sc->fw_stats_dma); sc->fw_stats_num = 0; sc->fw_stats_req_size = 0; sc->fw_stats_req = NULL; sc->fw_stats_req_mapping = 0; sc->fw_stats_data_size = 0; sc->fw_stats_data = NULL; sc->fw_stats_data_mapping = 0; } static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc) { uint8_t num_queue_stats; int num_groups; /* number of queues for statistics is number of eth queues */ num_queue_stats = BXE_NUM_ETH_QUEUES(sc); /* * Total number of FW statistics requests = * 1 for port stats + 1 for PF stats + num of queues */ sc->fw_stats_num = (2 + num_queue_stats); /* * Request is built from stats_query_header and an array of * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT * rules. The real number or requests is configured in the * stats_query_header. */ num_groups = ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", sc->fw_stats_num, num_groups); sc->fw_stats_req_size = (sizeof(struct stats_query_header) + (num_groups * sizeof(struct stats_query_cmd_group))); /* * Data for statistics requests + stats_counter. * stats_counter holds per-STORM counters that are incremented when * STORM has finished with the current request. Memory for FCoE * offloaded statistics are counted anyway, even if they will not be sent. * VF stats are not accounted for here as the data of VF stats is stored * in memory allocated by the VF, not here. */ sc->fw_stats_data_size = (sizeof(struct stats_counter) + sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + /* sizeof(struct fcoe_statistics_params) + */ (sizeof(struct per_queue_stats) * num_queue_stats)); if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), &sc->fw_stats_dma, "fw stats") != 0) { bxe_free_fw_stats_mem(sc); return (-1); } /* set up the shortcuts */ sc->fw_stats_req = (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; sc->fw_stats_data = (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + sc->fw_stats_req_size); sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + sc->fw_stats_req_size); BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", (uintmax_t)sc->fw_stats_req_mapping); BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", (uintmax_t)sc->fw_stats_data_mapping); return (0); } /* * Bits map: * 0-7 - Engine0 load counter. * 8-15 - Engine1 load counter. * 16 - Engine0 RESET_IN_PROGRESS bit. * 17 - Engine1 RESET_IN_PROGRESS bit. * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active * function on the engine * 19 - Engine1 ONE_IS_LOADED. * 20 - Chip reset flow bit. When set none-leader must wait for both engines * leader to complete (check for both RESET_IN_PROGRESS bits and not * for just the one belonging to its engine). */ #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff #define BXE_PATH0_LOAD_CNT_SHIFT 0 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 #define BXE_PATH1_LOAD_CNT_SHIFT 8 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 #define BXE_GLOBAL_RESET_BIT 0x00040000 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ static void bxe_set_reset_global(struct bxe_softc *sc) { uint32_t val; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ static void bxe_clear_reset_global(struct bxe_softc *sc) { uint32_t val; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ static uint8_t bxe_reset_is_global(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; } /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ static void bxe_set_reset_done(struct bxe_softc *sc) { uint32_t val; uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); /* Clear the bit */ val &= ~bit; REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ static void bxe_set_reset_in_progress(struct bxe_softc *sc) { uint32_t val; uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); /* Set the bit */ val |= bit; REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ static uint8_t bxe_reset_is_done(struct bxe_softc *sc, int engine) { uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; /* return false if bit is set */ return (val & bit) ? FALSE : TRUE; } /* get the load status for an engine, should be run under rtnl lock */ static uint8_t bxe_get_load_status(struct bxe_softc *sc, int engine) { uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); val = ((val & mask) >> shift); BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); return (val != 0); } /* set pf load mark */ /* XXX needs to be under rtnl lock */ static void bxe_set_pf_load(struct bxe_softc *sc) { uint32_t val; uint32_t val1; uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); /* get the current counter value */ val1 = ((val & mask) >> shift); /* set bit of this PF */ val1 |= (1 << SC_ABS_FUNC(sc)); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* clear pf load mark */ /* XXX needs to be under rtnl lock */ static uint8_t bxe_clear_pf_load(struct bxe_softc *sc) { uint32_t val1, val; uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; /* clear bit of that PF */ val1 &= ~(1 << SC_ABS_FUNC(sc)); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); return (val1 != 0); } /* send load requrest to mcp and analyze response */ static int bxe_nic_load_request(struct bxe_softc *sc, uint32_t *load_code) { /* init fw_seq */ sc->fw_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); /* get the current FW pulse sequence */ sc->fw_drv_pulse_wr_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & DRV_PULSE_SEQ_MASK); BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", sc->fw_drv_pulse_wr_seq); /* load request */ (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, DRV_MSG_CODE_LOAD_REQ_WITH_LFA); /* if the MCP fails to respond we must abort */ if (!(*load_code)) { BLOGE(sc, "MCP response failure!\n"); return (-1); } /* if MCP refused then must abort */ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { BLOGE(sc, "MCP refused load request\n"); return (-1); } return (0); } /* * Check whether another PF has already loaded FW to chip. In virtualized * environments a pf from anoth VM may have already initialized the device * including loading FW. */ static int bxe_nic_load_analyze_req(struct bxe_softc *sc, uint32_t load_code) { uint32_t my_fw, loaded_fw; /* is another pf loaded on this engine? */ if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { /* build my FW version dword */ my_fw = (BCM_5710_FW_MAJOR_VERSION + (BCM_5710_FW_MINOR_VERSION << 8 ) + (BCM_5710_FW_REVISION_VERSION << 16) + (BCM_5710_FW_ENGINEERING_VERSION << 24)); /* read loaded FW from chip */ loaded_fw = REG_RD(sc, XSEM_REG_PRAM); BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", loaded_fw, my_fw); /* abort nic load if version mismatch */ if (my_fw != loaded_fw) { BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", loaded_fw, my_fw); return (-1); } } return (0); } /* mark PMF if applicable */ static void bxe_nic_load_pmf(struct bxe_softc *sc, uint32_t load_code) { uint32_t ncsi_oem_data_addr; if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { /* * Barrier here for ordering between the writing to sc->port.pmf here * and reading it from the periodic task. */ sc->port.pmf = 1; mb(); } else { sc->port.pmf = 0; } BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); /* XXX needed? */ if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); if (ncsi_oem_data_addr) { REG_WR(sc, (ncsi_oem_data_addr + offsetof(struct glob_ncsi_oem_data, driver_version)), 0); } } } } static void bxe_read_mf_cfg(struct bxe_softc *sc) { int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); int abs_func; int vn; if (BXE_NOMCP(sc)) { return; /* what should be the default bvalue in this case */ } /* * The formula for computing the absolute function number is... * For 2 port configuration (4 functions per port): * abs_func = 2 * vn + SC_PORT + SC_PATH * For 4 port configuration (2 functions per port): * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH */ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); if (abs_func >= E1H_FUNC_MAX) { break; } sc->devinfo.mf_info.mf_config[vn] = MFCFG_RD(sc, func_mf_config[abs_func].config); } if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); sc->flags |= BXE_MF_FUNC_DIS; } else { BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); sc->flags &= ~BXE_MF_FUNC_DIS; } } /* acquire split MCP access lock register */ static int bxe_acquire_alr(struct bxe_softc *sc) { uint32_t j, val; for (j = 0; j < 1000; j++) { val = (1UL << 31); REG_WR(sc, GRCBASE_MCP + 0x9c, val); val = REG_RD(sc, GRCBASE_MCP + 0x9c); if (val & (1L << 31)) break; DELAY(5000); } if (!(val & (1L << 31))) { BLOGE(sc, "Cannot acquire MCP access lock register\n"); return (-1); } return (0); } /* release split MCP access lock register */ static void bxe_release_alr(struct bxe_softc *sc) { REG_WR(sc, GRCBASE_MCP + 0x9c, 0); } static void bxe_fan_failure(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t ext_phy_config; /* mark the failure */ ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, ext_phy_config); /* log the failure */ BLOGW(sc, "Fan Failure has caused the driver to shutdown " "the card to prevent permanent damage. " "Please contact OEM Support for assistance\n"); /* XXX */ #if 1 bxe_panic(sc, ("Schedule task to handle fan failure\n")); #else /* * Schedule device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); schedule_delayed_work(&sc->sp_rtnl_task, 0); #endif } /* this function is called upon a link interrupt */ static void bxe_link_attn(struct bxe_softc *sc) { uint32_t pause_enabled = 0; struct host_port_stats *pstats; int cmng_fns; /* Make sure that we are synced with the current statistics */ bxe_stats_handle(sc, STATS_EVENT_STOP); elink_link_update(&sc->link_params, &sc->link_vars); if (sc->link_vars.link_up) { /* dropless flow control */ if (!CHIP_IS_E1(sc) && sc->dropless_fc) { pause_enabled = 0; if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { pause_enabled = 1; } REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), pause_enabled); } if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { pstats = BXE_SP(sc, port_stats); /* reset old mac stats */ memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); } if (sc->state == BXE_STATE_OPEN) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } } if (sc->link_vars.link_up && sc->link_vars.line_speed) { cmng_fns = bxe_get_cmng_fns_mode(sc); if (cmng_fns != CMNG_FNS_NONE) { bxe_cmng_fns_init(sc, FALSE, cmng_fns); storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } else { /* rate shaping and fairness are disabled */ BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); } } bxe_link_report_locked(sc); if (IS_MF(sc)) { ; // XXX bxe_link_sync_notify(sc); } } static void bxe_attn_int_asserted(struct bxe_softc *sc, uint32_t asserted) { int port = SC_PORT(sc); uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : NIG_REG_MASK_INTERRUPT_PORT0; uint32_t aeu_mask; uint32_t nig_mask = 0; uint32_t reg_addr; uint32_t igu_acked; uint32_t cnt; if (sc->attn_state & asserted) { BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(sc, aeu_addr); BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", aeu_mask, asserted); aeu_mask &= ~(asserted & 0x3ff); BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); REG_WR(sc, aeu_addr, aeu_mask); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); sc->attn_state |= asserted; BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); if (asserted & ATTN_HARD_WIRED_MASK) { if (asserted & ATTN_NIG_FOR_FUNC) { bxe_acquire_phy_lock(sc); /* save nig interrupt mask */ nig_mask = REG_RD(sc, nig_int_mask_addr); /* If nig_mask is not set, no need to call the update function */ if (nig_mask) { REG_WR(sc, nig_int_mask_addr, 0); bxe_link_attn(sc); } /* handle unicore attn? */ } if (asserted & ATTN_SW_TIMER_4_FUNC) { BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); } if (asserted & GPIO_2_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); } if (asserted & GPIO_3_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); } if (asserted & GPIO_4_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); } if (port == 0) { if (asserted & ATTN_GENERAL_ATTN_1) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); } if (asserted & ATTN_GENERAL_ATTN_2) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); } if (asserted & ATTN_GENERAL_ATTN_3) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); } } else { if (asserted & ATTN_GENERAL_ATTN_4) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); } if (asserted & ATTN_GENERAL_ATTN_5) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); } if (asserted & ATTN_GENERAL_ATTN_6) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); } } } /* hardwired */ if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); } else { reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); } BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", asserted, (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(sc, reg_addr, asserted); /* now set back the mask */ if (asserted & ATTN_NIG_FOR_FUNC) { /* * Verify that IGU ack through BAR was written before restoring * NIG mask. This loop should exit after 2-3 iterations max. */ if (sc->devinfo.int_block != INT_BLOCK_HC) { cnt = 0; do { igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && (++cnt < MAX_IGU_ATTN_ACK_TO)); if (!igu_acked) { BLOGE(sc, "Failed to verify IGU ack on time\n"); } mb(); } REG_WR(sc, nig_int_mask_addr, nig_mask); bxe_release_phy_lock(sc); } } static void bxe_print_next_block(struct bxe_softc *sc, int idx, const char *blk) { BLOGI(sc, "%s%s", idx ? ", " : "", blk); } static int bxe_check_blocks_with_parity0(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "BRB"); break; case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PARSER"); break; case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TSDM"); break; case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "SEARCHER"); break; case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TCM"); break; case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TSEMI"); break; case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XPB"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity1(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t *global, uint8_t print) { int i = 0; uint32_t cur_bit = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PBF"); break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "QM"); break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TM"); break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XSDM"); break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XCM"); break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XSEMI"); break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DOORBELLQ"); break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "NIG"); break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DEBUG"); break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "USDM"); break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "UCM"); break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "USEMI"); break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "UPB"); break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CSDM"); break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CCM"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity2(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CSEMI"); break; case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PXP"); break; case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); break; case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CFC"); break; case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CDU"); break; case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DMAE"); break; case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "IGU"); break; case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "MISC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity3(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t *global, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP ROM"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP UMP RX"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP UMP TX"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP SCPAD"); *global = TRUE; break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity4(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PGLUE_B"); break; case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "ATC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static uint8_t bxe_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print, uint32_t *sig) { int par_num = 0; if ((sig[0] & HW_PRTY_ASSERT_SET_0) || (sig[1] & HW_PRTY_ASSERT_SET_1) || (sig[2] & HW_PRTY_ASSERT_SET_2) || (sig[3] & HW_PRTY_ASSERT_SET_3) || (sig[4] & HW_PRTY_ASSERT_SET_4)) { BLOGE(sc, "Parity error: HW block parity attention:\n" "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); if (print) BLOGI(sc, "Parity errors detected in blocks: "); par_num = bxe_check_blocks_with_parity0(sc, sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); par_num = bxe_check_blocks_with_parity1(sc, sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); par_num = bxe_check_blocks_with_parity2(sc, sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); par_num = bxe_check_blocks_with_parity3(sc, sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); par_num = bxe_check_blocks_with_parity4(sc, sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); if (print) BLOGI(sc, "\n"); return (TRUE); } return (FALSE); } static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print) { struct attn_route attn = { {0} }; int port = SC_PORT(sc); attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); /* * Since MCP attentions can't be disabled inside the block, we need to * read AEU registers to see whether they're currently disabled */ attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) & MISC_AEU_ENABLE_MCP_PRTY_BITS) | ~MISC_AEU_ENABLE_MCP_PRTY_BITS); if (!CHIP_IS_E1x(sc)) attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); return (bxe_parity_attn(sc, global, print, attn.sig)); } static void bxe_attn_int_deasserted4(struct bxe_softc *sc, uint32_t attn) { uint32_t val; if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); } if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); BLOGE(sc, "ATC hw attention 0x%08x\n", val); if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); } if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { BLOGE(sc, "FATAL parity attention set4 0x%08x\n", (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); } } static void bxe_e1h_disable(struct bxe_softc *sc) { int port = SC_PORT(sc); bxe_tx_disable(sc); REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); } static void bxe_e1h_enable(struct bxe_softc *sc) { int port = SC_PORT(sc); REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); // XXX bxe_tx_enable(sc); } /* * called due to MCP event (on pmf): * reread new bandwidth configuration * configure FW * notify others function about the change */ static void bxe_config_mf_bw(struct bxe_softc *sc) { if (sc->link_vars.link_up) { bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); // XXX bxe_link_sync_notify(sc); } storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } static void bxe_set_mf_bw(struct bxe_softc *sc) { bxe_config_mf_bw(sc); bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); } static void bxe_handle_eee_event(struct bxe_softc *sc) { BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); } #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 static void bxe_drv_info_ether_stat(struct bxe_softc *sc) { struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat; strlcpy(ether_stat->version, BXE_DRIVER_VERSION, ETH_STAT_INFO_VERSION_LEN); /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, ether_stat->mac_local + MAC_PAD, MAC_PAD, ETH_ALEN); ether_stat->mtu_size = sc->mtu; ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; } // XXX ether_stat->feature_flags |= ???; ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; ether_stat->txq_size = sc->tx_ring_size; ether_stat->rxq_size = sc->rx_ring_size; } static void bxe_handle_drv_info_req(struct bxe_softc *sc) { enum drv_info_opcode op_code; uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); /* if drv_info version supported by MFW doesn't match - send NACK */ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> DRV_INFO_CONTROL_OP_CODE_SHIFT); memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); switch (op_code) { case ETH_STATS_OPCODE: bxe_drv_info_ether_stat(sc); break; case FCOE_STATS_OPCODE: case ISCSI_STATS_OPCODE: default: /* if op code isn't supported - send NACK */ bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } /* * If we got drv_info attn from MFW then these fields are defined in * shmem2 for sure */ SHMEM2_WR(sc, drv_info_host_addr_lo, U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); SHMEM2_WR(sc, drv_info_host_addr_hi, U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); } static void bxe_dcc_event(struct bxe_softc *sc, uint32_t dcc_event) { BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { /* * This is the only place besides the function initialization * where the sc->flags can change so it is done without any * locks */ if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); sc->flags |= BXE_MF_FUNC_DIS; bxe_e1h_disable(sc); } else { BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); sc->flags &= ~BXE_MF_FUNC_DIS; bxe_e1h_enable(sc); } dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; } if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { bxe_config_mf_bw(sc); dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; } /* Report results to MCP */ if (dcc_event) bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); else bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); } static void bxe_pmf_update(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t val; sc->port.pmf = 1; BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); /* * We need the mb() to ensure the ordering between the writing to * sc->port.pmf here and reading it from the bxe_periodic_task(). */ mb(); /* queue a periodic task */ // XXX schedule task... // XXX bxe_dcbx_pmf_update(sc); /* enable nig attention */ val = (0xff0f | (1 << (SC_VN(sc) + 4))); if (sc->devinfo.int_block == INT_BLOCK_HC) { REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); } else if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); } bxe_stats_handle(sc, STATS_EVENT_PMF); } static int bxe_mc_assert(struct bxe_softc *sc) { char last_idx; int i, rc = 0; uint32_t row0, row1, row2, row3; /* XSTORM */ last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* TSTORM */ last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* CSTORM */ last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* USTORM */ last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } return (rc); } static void bxe_attn_int_deasserted3(struct bxe_softc *sc, uint32_t attn) { int func = SC_FUNC(sc); uint32_t val; if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { if (attn & BXE_PMF_LINK_ASSERT(sc)) { REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bxe_read_mf_cfg(sc); sc->devinfo.mf_info.mf_config[SC_VN(sc)] = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); if (val & DRV_STATUS_DCC_EVENT_MASK) bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); if (val & DRV_STATUS_SET_MF_BW) bxe_set_mf_bw(sc); if (val & DRV_STATUS_DRV_INFO_REQ) bxe_handle_drv_info_req(sc); if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) bxe_pmf_update(sc); if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) bxe_handle_eee_event(sc); if (sc->link_vars.periodic_flags & ELINK_PERIODIC_FLAGS_LINK_EVENT) { /* sync with link */ bxe_acquire_phy_lock(sc); sc->link_vars.periodic_flags &= ~ELINK_PERIODIC_FLAGS_LINK_EVENT; bxe_release_phy_lock(sc); if (IS_MF(sc)) ; // XXX bxe_link_sync_notify(sc); bxe_link_report(sc); } /* * Always call it here: bxe_link_report() will * prevent the link indication duplication. */ bxe_link_status_update(sc); } else if (attn & BXE_MC_ASSERT_BITS) { BLOGE(sc, "MC assert!\n"); bxe_mc_assert(sc); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); bxe_panic(sc, ("MC assert!\n")); } else if (attn & BXE_MCP_ASSERT) { BLOGE(sc, "MCP assert!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); // XXX bxe_fw_dump(sc); } else { BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); } } if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); if (attn & BXE_GRC_TIMEOUT) { val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); BLOGE(sc, "GRC time-out 0x%08x\n", val); } if (attn & BXE_GRC_RSV) { val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); BLOGE(sc, "GRC reserved 0x%08x\n", val); } REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); } } static void bxe_attn_int_deasserted2(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val0, mask0, val1, mask1; uint32_t val; if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); BLOGE(sc, "CFC hw attention 0x%08x\n", val); /* CFC error attention */ if (val & 0x2) { BLOGE(sc, "FATAL error from CFC\n"); } } if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); /* RQ_USDMDP_FIFO_OVERFLOW */ if (val & 0x18000) { BLOGE(sc, "FATAL error from PXP\n"); } if (!CHIP_IS_E1x(sc)) { val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); } } #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT if (attn & AEU_PXP2_HW_INT_BIT) { /* CQ47854 workaround do not panic on * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR */ if (!CHIP_IS_E1x(sc)) { mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); /* * If the olny PXP2_EOP_ERROR_BIT is set in * STS0 and STS1 - clear it * * probably we lose additional attentions between * STS0 and STS_CLR0, in this case user will not * be notified about them */ if (val0 & mask0 & PXP2_EOP_ERROR_BIT && !(val1 & mask1)) val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); /* print the register, since no one can restore it */ BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); /* * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR * then notify */ if (val0 & PXP2_EOP_ERROR_BIT) { BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); /* * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is * set then clear attention from PXP2 block without panic */ if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && ((val1 & mask1) == 0)) attn &= ~AEU_PXP2_HW_INT_BIT; } } } if (attn & HW_INTERRUT_ASSERT_SET_2) { reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); REG_WR(sc, reg_offset, val); BLOGE(sc, "FATAL HW block attention set2 0x%x\n", (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); bxe_panic(sc, ("HW block attention set2\n")); } } static void bxe_attn_int_deasserted1(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val; if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); BLOGE(sc, "DB hw attention 0x%08x\n", val); /* DORQ discard attention */ if (val & 0x2) { BLOGE(sc, "FATAL error from DORQ\n"); } } if (attn & HW_INTERRUT_ASSERT_SET_1) { reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); REG_WR(sc, reg_offset, val); BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); bxe_panic(sc, ("HW block attention set1\n")); } } static void bxe_attn_int_deasserted0(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val; reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { val = REG_RD(sc, reg_offset); val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(sc, reg_offset, val); BLOGW(sc, "SPIO5 hw attention\n"); /* Fan failure attention */ elink_hw_reset_phy(&sc->link_params); bxe_fan_failure(sc); } if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { bxe_acquire_phy_lock(sc); elink_handle_module_detect_int(&sc->link_params); bxe_release_phy_lock(sc); } if (attn & HW_INTERRUT_ASSERT_SET_0) { val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); REG_WR(sc, reg_offset, val); bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", (attn & HW_INTERRUT_ASSERT_SET_0))); } } static void bxe_attn_int_deasserted(struct bxe_softc *sc, uint32_t deasserted) { struct attn_route attn; struct attn_route *group_mask; int port = SC_PORT(sc); int index; uint32_t reg_addr; uint32_t val; uint32_t aeu_mask; uint8_t global = FALSE; /* * Need to take HW lock because MCP or other port might also * try to handle this event. */ bxe_acquire_alr(sc); if (bxe_chk_parity_attn(sc, &global, TRUE)) { /* XXX * In case of parity errors don't handle attentions so that * other function would "see" parity errors. */ sc->recovery_state = BXE_RECOVERY_INIT; // XXX schedule a recovery task... /* disable HW interrupts */ bxe_int_disable(sc); bxe_release_alr(sc); return; } attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); if (!CHIP_IS_E1x(sc)) { attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); } else { attn.sig[4] = 0; } BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { if (deasserted & (1 << index)) { group_mask = &sc->attn_group[index]; BLOGD(sc, DBG_INTR, "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, group_mask->sig[0], group_mask->sig[1], group_mask->sig[2], group_mask->sig[3], group_mask->sig[4]); bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); } } bxe_release_alr(sc); if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); } else { reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); } val = ~deasserted; BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", val, (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(sc, reg_addr, val); if (~sc->attn_state & deasserted) { BLOGE(sc, "IGU error\n"); } reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(sc, reg_addr); BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", aeu_mask, deasserted); aeu_mask |= (deasserted & 0x3ff); BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); REG_WR(sc, reg_addr, aeu_mask); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); sc->attn_state &= ~deasserted; BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); } static void bxe_attn_int(struct bxe_softc *sc) { /* read local copy of bits */ uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); uint32_t attn_state = sc->attn_state; /* look for changed bits */ uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; uint32_t deasserted = ~attn_bits & attn_ack & attn_state; BLOGD(sc, DBG_INTR, "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", attn_bits, attn_ack, asserted, deasserted); if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { BLOGE(sc, "BAD attention state\n"); } /* handle bits that were raised */ if (asserted) { bxe_attn_int_asserted(sc, asserted); } if (deasserted) { bxe_attn_int_deasserted(sc, deasserted); } } static uint16_t bxe_update_dsb_idx(struct bxe_softc *sc) { struct host_sp_status_block *def_sb = sc->def_sb; uint16_t rc = 0; mb(); /* status block is written to by the chip */ if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; rc |= BXE_DEF_SB_ATT_IDX; } if (sc->def_idx != def_sb->sp_sb.running_index) { sc->def_idx = def_sb->sp_sb.running_index; rc |= BXE_DEF_SB_IDX; } mb(); return (rc); } static inline struct ecore_queue_sp_obj * bxe_cid_to_q_obj(struct bxe_softc *sc, uint32_t cid) { BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); } static void bxe_handle_mcast_eqe(struct bxe_softc *sc) { struct ecore_mcast_ramrod_params rparam; int rc; memset(&rparam, 0, sizeof(rparam)); rparam.mcast_obj = &sc->mcast_obj; BXE_MCAST_LOCK(sc); /* clear pending state for the last command */ sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); /* if there are pending mcast commands - send them */ if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); if (rc < 0) { BLOGD(sc, DBG_SP, "ERROR: Failed to send pending mcast commands (%d)\n", rc); } } BXE_MCAST_UNLOCK(sc); } static void bxe_handle_classification_eqe(struct bxe_softc *sc, union event_ring_elem *elem) { unsigned long ramrod_flags = 0; int rc = 0; uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; struct ecore_vlan_mac_obj *vlan_mac_obj; /* always push next commands out, don't wait here */ bit_set(&ramrod_flags, RAMROD_CONT); switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { case ECORE_FILTER_MAC_PENDING: BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); vlan_mac_obj = &sc->sp_objs[cid].mac_obj; break; case ECORE_FILTER_MCAST_PENDING: BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); /* * This is only relevant for 57710 where multicast MACs are * configured as unicast MACs using the same ramrod. */ bxe_handle_mcast_eqe(sc); return; default: BLOGE(sc, "Unsupported classification command: %d\n", elem->message.data.eth_event.echo); return; } rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); if (rc < 0) { BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); } else if (rc > 0) { BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); } } static void bxe_handle_rx_mode_eqe(struct bxe_softc *sc, union event_ring_elem *elem) { bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); /* send rx_mode command again if was requested */ if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state)) { bxe_set_storm_rx_mode(sc); } } static void bxe_update_eq_prod(struct bxe_softc *sc, uint16_t prod) { storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); wmb(); /* keep prod updates ordered */ } static void bxe_eq_int(struct bxe_softc *sc) { uint16_t hw_cons, sw_cons, sw_prod; union event_ring_elem *elem; uint8_t echo; uint32_t cid; uint8_t opcode; int spqe_cnt = 0; struct ecore_queue_sp_obj *q_obj; struct ecore_func_sp_obj *f_obj = &sc->func_obj; struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; hw_cons = le16toh(*sc->eq_cons_sb); /* * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. * when we get to the next-page we need to adjust so the loop * condition below will be met. The next element is the size of a * regular element and hence incrementing by 1 */ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { hw_cons++; } /* * This function may never run in parallel with itself for a * specific sc and no need for a read memory barrier here. */ sw_cons = sc->eq_cons; sw_prod = sc->eq_prod; BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); for (; sw_cons != hw_cons; sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { elem = &sc->eq[EQ_DESC(sw_cons)]; /* elem CID originates from FW, actually LE */ cid = SW_CID(elem->message.data.cfc_del_event.cid); opcode = elem->message.opcode; /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_STAT_QUERY: BLOGD(sc, DBG_SP, "got statistics completion event %d\n", sc->stats_comp++); /* nothing to do with stats comp */ goto next_spqe; case EVENT_RING_OPCODE_CFC_DEL: /* handle according to cid range */ /* we may want to verify here that the sc state is HALTING */ BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); q_obj = bxe_cid_to_q_obj(sc, cid); if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { break; } goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { break; } // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { break; } // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_UPDATE: echo = elem->message.data.function_update_event.echo; if (echo == SWITCH_UPDATE) { BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_SWITCH_UPDATE)) { break; } } else { BLOGD(sc, DBG_SP, "AFEX: ramrod completed FUNCTION_UPDATE\n"); } goto next_spqe; case EVENT_RING_OPCODE_FORWARD_SETUP: q_obj = &bxe_fwd_sp_obj(sc, q_obj); if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_SETUP_TX_ONLY)) { break; } goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_START: BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { break; } goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_STOP: BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { break; } goto next_spqe; } switch (opcode | sc->state) { case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); rss_raw->clear_pending(rss_raw); break; case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); bxe_handle_classification_eqe(sc, elem); break; case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got mcast ramrod\n"); bxe_handle_mcast_eqe(sc); break; case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); bxe_handle_rx_mode_eqe(sc, elem); break; default: /* unknown event log error and continue */ BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", elem->message.opcode, sc->state); } next_spqe: spqe_cnt++; } /* for */ mb(); atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); sc->eq_cons = sw_cons; sc->eq_prod = sw_prod; /* make sure that above mem writes were issued towards the memory */ wmb(); /* update producer */ bxe_update_eq_prod(sc, sc->eq_prod); } static void bxe_handle_sp_tq(void *context, int pending) { struct bxe_softc *sc = (struct bxe_softc *)context; uint16_t status; BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); /* what work needs to be performed? */ status = bxe_update_dsb_idx(sc); BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); /* HW attentions */ if (status & BXE_DEF_SB_ATT_IDX) { BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); bxe_attn_int(sc); status &= ~BXE_DEF_SB_ATT_IDX; } /* SP events: STAT_QUERY and others */ if (status & BXE_DEF_SB_IDX) { /* handle EQ completions */ BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); bxe_eq_int(sc); bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, le16toh(sc->def_idx), IGU_INT_NOP, 1); status &= ~BXE_DEF_SB_IDX; } /* if status is non zero then something went wrong */ if (__predict_false(status)) { BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); } /* ack status block only if something was actually handled */ bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); /* * Must be called after the EQ processing (since eq leads to sriov * ramrod completion flows). * This flow may have been scheduled by the arrival of a ramrod * completion, or by the sriov code rescheduling itself. */ // XXX bxe_iov_sp_task(sc); } static void bxe_handle_fp_tq(void *context, int pending) { struct bxe_fastpath *fp = (struct bxe_fastpath *)context; struct bxe_softc *sc = fp->sc; uint8_t more_tx = FALSE; uint8_t more_rx = FALSE; BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); /* XXX * IFF_DRV_RUNNING state can't be checked here since we process * slowpath events on a client queue during setup. Instead * we need to add a "process/continue" flag here that the driver * can use to tell the task here not to do anything. */ #if 0 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { return; } #endif /* update the fastpath index */ bxe_update_fp_sb_idx(fp); /* XXX add loop here if ever support multiple tx CoS */ /* fp->txdata[cos] */ if (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); more_tx = bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); } if (bxe_has_rx_work(fp)) { more_rx = bxe_rxeof(sc, fp); } if (more_rx /*|| more_tx*/) { /* still more work to do */ taskqueue_enqueue(fp->tq, &fp->tq_task); return; } bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } static void bxe_task_fp(struct bxe_fastpath *fp) { struct bxe_softc *sc = fp->sc; uint8_t more_tx = FALSE; uint8_t more_rx = FALSE; BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); /* update the fastpath index */ bxe_update_fp_sb_idx(fp); /* XXX add loop here if ever support multiple tx CoS */ /* fp->txdata[cos] */ if (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); more_tx = bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); } if (bxe_has_rx_work(fp)) { more_rx = bxe_rxeof(sc, fp); } if (more_rx /*|| more_tx*/) { /* still more work to do, bail out if this ISR and process later */ taskqueue_enqueue(fp->tq, &fp->tq_task); return; } /* * Here we write the fastpath index taken before doing any tx or rx work. * It is very well possible other hw events occurred up to this point and * they were actually processed accordingly above. Since we're going to * write an older fastpath index, an interrupt is coming which we might * not do any work in. */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } /* * Legacy interrupt entry point. * * Verifies that the controller generated the interrupt and * then calls a separate routine to handle the various * interrupt causes: link, RX, and TX. */ static void bxe_intr_legacy(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; struct bxe_fastpath *fp; uint16_t status, mask; int i; BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); /* * 0 for ustorm, 1 for cstorm * the bits returned from ack_int() are 0-15 * bit 0 = attention status block * bit 1 = fast path status block * a mask of 0x2 or more = tx/rx event * a mask of 1 = slow path event */ status = bxe_ack_int(sc); /* the interrupt is not for us */ if (__predict_false(status == 0)) { BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); return; } BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); if (status & mask) { /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bxe_task_fp(fp); status &= ~mask; } } if (__predict_false(status & 0x1)) { /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); /* schedule slowpath handler */ taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); status &= ~0x1; } if (__predict_false(status)) { BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); } } /* slowpath interrupt entry point */ static void bxe_intr_sp(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); /* schedule slowpath handler */ taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); } /* fastpath interrupt entry point */ static void bxe_intr_fp(void *xfp) { struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; struct bxe_softc *sc = fp->sc; BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); BLOGD(sc, DBG_INTR, "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bxe_task_fp(fp); } /* Release all interrupts allocated by the driver. */ static void bxe_interrupt_free(struct bxe_softc *sc) { int i; switch (sc->interrupt_mode) { case INTR_MODE_INTX: BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); if (sc->intr[0].resource != NULL) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[0].rid, sc->intr[0].resource); } break; case INTR_MODE_MSI: for (i = 0; i < sc->intr_count; i++) { BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); if (sc->intr[i].resource && sc->intr[i].rid) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[i].rid, sc->intr[i].resource); } } pci_release_msi(sc->dev); break; case INTR_MODE_MSIX: for (i = 0; i < sc->intr_count; i++) { BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); if (sc->intr[i].resource && sc->intr[i].rid) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[i].rid, sc->intr[i].resource); } } pci_release_msi(sc->dev); break; default: /* nothing to do as initial allocation failed */ break; } } /* * This function determines and allocates the appropriate * interrupt based on system capabilites and user request. * * The user may force a particular interrupt mode, specify * the number of receive queues, specify the method for * distribuitng received frames to receive queues, or use * the default settings which will automatically select the * best supported combination. In addition, the OS may or * may not support certain combinations of these settings. * This routine attempts to reconcile the settings requested * by the user with the capabilites available from the system * to select the optimal combination of features. * * Returns: * 0 = Success, !0 = Failure. */ static int bxe_interrupt_alloc(struct bxe_softc *sc) { int msix_count = 0; int msi_count = 0; int num_requested = 0; int num_allocated = 0; int rid, i, j; int rc; /* get the number of available MSI/MSI-X interrupts from the OS */ if (sc->interrupt_mode > 0) { if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { msix_count = pci_msix_count(sc->dev); } if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { msi_count = pci_msi_count(sc->dev); } BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", msi_count, msix_count); } do { /* try allocating MSI-X interrupt resources (at least 2) */ if (sc->interrupt_mode != INTR_MODE_MSIX) { break; } if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || (msix_count < 2)) { sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ break; } /* ask for the necessary number of MSI-X vectors */ num_requested = min((sc->num_queues + 1), msix_count); BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); num_allocated = num_requested; if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ break; } if (num_allocated < 2) { /* possible? */ BLOGE(sc, "MSI-X allocation less than 2!\n"); sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ pci_release_msi(sc->dev); break; } BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", num_requested, num_allocated); /* best effort so use the number of vectors allocated to us */ sc->intr_count = num_allocated; sc->num_queues = num_allocated - 1; rid = 1; /* initial resource identifier */ /* allocate the MSI-X vectors */ for (i = 0; i < num_allocated; i++) { sc->intr[i].rid = (rid + i); if ((sc->intr[i].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[i].rid, RF_ACTIVE)) == NULL) { BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", i, (rid + i)); for (j = (i - 1); j >= 0; j--) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[j].rid, sc->intr[j].resource); } sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ pci_release_msi(sc->dev); break; } BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); } } while (0); do { /* try allocating MSI vector resources (at least 2) */ if (sc->interrupt_mode != INTR_MODE_MSI) { break; } if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || (msi_count < 1)) { sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ break; } /* ask for a single MSI vector */ num_requested = 1; BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); num_allocated = num_requested; if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { BLOGE(sc, "MSI alloc failed (%d)!\n", rc); sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ break; } if (num_allocated != 1) { /* possible? */ BLOGE(sc, "MSI allocation is not 1!\n"); sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ pci_release_msi(sc->dev); break; } BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", num_requested, num_allocated); /* best effort so use the number of vectors allocated to us */ sc->intr_count = num_allocated; sc->num_queues = num_allocated; rid = 1; /* initial resource identifier */ sc->intr[0].rid = rid; if ((sc->intr[0].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[0].rid, RF_ACTIVE)) == NULL) { BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ pci_release_msi(sc->dev); break; } BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); } while (0); do { /* try allocating INTx vector resources */ if (sc->interrupt_mode != INTR_MODE_INTX) { break; } BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); /* only one vector for INTx */ sc->intr_count = 1; sc->num_queues = 1; rid = 0; /* initial resource identifier */ sc->intr[0].rid = rid; if ((sc->intr[0].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[0].rid, (RF_ACTIVE | RF_SHAREABLE))) == NULL) { BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = -1; /* Failed! */ break; } BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); } while (0); if (sc->interrupt_mode == -1) { BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); rc = 1; } else { BLOGD(sc, DBG_LOAD, "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", sc->interrupt_mode, sc->num_queues); rc = 0; } return (rc); } static void bxe_interrupt_detach(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; /* release interrupt resources */ for (i = 0; i < sc->intr_count; i++) { if (sc->intr[i].resource && sc->intr[i].tag) { BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); } } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tq) { taskqueue_drain(fp->tq, &fp->tq_task); taskqueue_free(fp->tq); fp->tq = NULL; } } if (sc->sp_tq) { taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); taskqueue_free(sc->sp_tq); sc->sp_tq = NULL; } } /* * Enables interrupts and attach to the ISR. * * When using multiple MSI/MSI-X vectors the first vector * is used for slowpath operations while all remaining * vectors are used for fastpath operations. If only a * single MSI/MSI-X vector is used (SINGLE_ISR) then the * ISR must look for both slowpath and fastpath completions. */ static int bxe_interrupt_attach(struct bxe_softc *sc) { struct bxe_fastpath *fp; int rc = 0; int i; snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), "bxe%d_sp_tq", sc->unit); TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT, taskqueue_thread_enqueue, &sc->sp_tq); taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ "%s", sc->sp_tq_name); for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; snprintf(fp->tq_name, sizeof(fp->tq_name), "bxe%d_fp%d_tq", sc->unit, i); TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT, taskqueue_thread_enqueue, &fp->tq); taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ "%s", fp->tq_name); } /* setup interrupt handlers */ if (sc->interrupt_mode == INTR_MODE_MSIX) { BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); /* * Setup the interrupt handler. Note that we pass the driver instance * to the interrupt handler for the slowpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_sp, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); goto bxe_interrupt_attach_exit; } bus_describe_intr(sc->dev, sc->intr[0].resource, sc->intr[0].tag, "sp"); /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ /* initialize the fastpath vectors (note the first was used for sp) */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); /* * Setup the interrupt handler. Note that we pass the * fastpath context to the interrupt handler in this * case. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_fp, fp, &sc->intr[i + 1].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", (i + 1), rc); goto bxe_interrupt_attach_exit; } bus_describe_intr(sc->dev, sc->intr[i + 1].resource, sc->intr[i + 1].tag, "fp%02d", i); /* bind the fastpath instance to a cpu */ if (sc->num_queues > 1) { bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); } fp->state = BXE_FP_STATE_IRQ; } } else if (sc->interrupt_mode == INTR_MODE_MSI) { BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); /* * Setup the interrupt handler. Note that we pass the * driver instance to the interrupt handler which * will handle both the slowpath and fastpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_legacy, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); goto bxe_interrupt_attach_exit; } } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); /* * Setup the interrupt handler. Note that we pass the * driver instance to the interrupt handler which * will handle both the slowpath and fastpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_legacy, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); goto bxe_interrupt_attach_exit; } } bxe_interrupt_attach_exit: return (rc); } static int bxe_init_hw_common_chip(struct bxe_softc *sc); static int bxe_init_hw_common(struct bxe_softc *sc); static int bxe_init_hw_port(struct bxe_softc *sc); static int bxe_init_hw_func(struct bxe_softc *sc); static void bxe_reset_common(struct bxe_softc *sc); static void bxe_reset_port(struct bxe_softc *sc); static void bxe_reset_func(struct bxe_softc *sc); static int bxe_gunzip_init(struct bxe_softc *sc); static void bxe_gunzip_end(struct bxe_softc *sc); static int bxe_init_firmware(struct bxe_softc *sc); static void bxe_release_firmware(struct bxe_softc *sc); static struct ecore_func_sp_drv_ops bxe_func_sp_drv = { .init_hw_cmn_chip = bxe_init_hw_common_chip, .init_hw_cmn = bxe_init_hw_common, .init_hw_port = bxe_init_hw_port, .init_hw_func = bxe_init_hw_func, .reset_hw_cmn = bxe_reset_common, .reset_hw_port = bxe_reset_port, .reset_hw_func = bxe_reset_func, .gunzip_init = bxe_gunzip_init, .gunzip_end = bxe_gunzip_end, .init_fw = bxe_init_firmware, .release_fw = bxe_release_firmware, }; static void bxe_init_func_obj(struct bxe_softc *sc) { sc->dmae_ready = 0; ecore_init_func_obj(sc, &sc->func_obj, BXE_SP(sc, func_rdata), BXE_SP_MAPPING(sc, func_rdata), BXE_SP(sc, func_afex_rdata), BXE_SP_MAPPING(sc, func_afex_rdata), &bxe_func_sp_drv); } static int bxe_init_hw(struct bxe_softc *sc, uint32_t load_code) { struct ecore_func_state_params func_params = { NULL }; int rc; /* prepare the parameters for function state transitions */ bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_HW_INIT; func_params.params.hw_init.load_phase = load_code; /* * Via a plethora of function pointers, we will eventually reach * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). */ rc = ecore_func_state_change(sc, &func_params); return (rc); } static void bxe_fill(struct bxe_softc *sc, uint32_t addr, int fill, uint32_t len) { uint32_t i; if (!(len % 4) && !(addr % 4)) { for (i = 0; i < len; i += 4) { REG_WR(sc, (addr + i), fill); } } else { for (i = 0; i < len; i++) { REG_WR8(sc, (addr + i), fill); } } } /* writes FP SP data to FW - data_size in dwords */ static void bxe_wr_fp_sb_data(struct bxe_softc *sc, int fw_sb_id, uint32_t *sb_data_p, uint32_t data_size) { int index; for (index = 0; index < data_size; index++) { REG_WR(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + (sizeof(uint32_t) * index)), *(sb_data_p + index)); } } static void bxe_zero_fp_sb(struct bxe_softc *sc, int fw_sb_id) { struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; uint32_t *sb_data_p; uint32_t data_size = 0; if (!CHIP_IS_E1x(sc)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_DISABLED; sb_data_e2.common.p_func.vf_valid = FALSE; sb_data_p = (uint32_t *)&sb_data_e2; data_size = (sizeof(struct hc_status_block_data_e2) / sizeof(uint32_t)); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_DISABLED; sb_data_e1x.common.p_func.vf_valid = FALSE; sb_data_p = (uint32_t *)&sb_data_e1x; data_size = (sizeof(struct hc_status_block_data_e1x) / sizeof(uint32_t)); } bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0, CSTORM_STATUS_BLOCK_SIZE); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 0, CSTORM_SYNC_BLOCK_SIZE); } static void bxe_wr_sp_sb_data(struct bxe_softc *sc, struct hc_sp_status_block_data *sp_sb_data) { int i; for (i = 0; i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); i++) { REG_WR(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + (i * sizeof(uint32_t))), *((uint32_t *)sp_sb_data + i)); } } static void bxe_zero_sp_sb(struct bxe_softc *sc) { struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); sp_sb_data.state = SB_DISABLED; sp_sb_data.p_func.vf_valid = FALSE; bxe_wr_sp_sb_data(sc, &sp_sb_data); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 0, CSTORM_SP_STATUS_BLOCK_SIZE); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 0, CSTORM_SP_SYNC_BLOCK_SIZE); } static void bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { hc_sm->igu_sb_id = igu_sb_id; hc_sm->igu_seg_id = igu_seg_id; hc_sm->timer_value = 0xFF; hc_sm->time_to_expire = 0xFFFFFFFF; } static void bxe_map_sb_state_machines(struct hc_index_data *index_data) { /* zero out state machine indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; /* map indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); } static void bxe_init_sb(struct bxe_softc *sc, bus_addr_t busaddr, int vfid, uint8_t vf_valid, int fw_sb_id, int igu_sb_id) { struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; struct hc_status_block_sm *hc_sm_p; uint32_t *sb_data_p; int igu_seg_id; int data_size; if (CHIP_INT_MODE_IS_BC(sc)) { igu_seg_id = HC_SEG_ACCESS_NORM; } else { igu_seg_id = IGU_SEG_ACCESS_NORM; } bxe_zero_fp_sb(sc, fw_sb_id); if (!CHIP_IS_E1x(sc)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_ENABLED; sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); sb_data_e2.common.p_func.vf_id = vfid; sb_data_e2.common.p_func.vf_valid = vf_valid; sb_data_e2.common.p_func.vnic_id = SC_VN(sc); sb_data_e2.common.same_igu_sb_1b = TRUE; sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); hc_sm_p = sb_data_e2.common.state_machine; sb_data_p = (uint32_t *)&sb_data_e2; data_size = (sizeof(struct hc_status_block_data_e2) / sizeof(uint32_t)); bxe_map_sb_state_machines(sb_data_e2.index_data); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_ENABLED; sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); sb_data_e1x.common.p_func.vf_id = 0xff; sb_data_e1x.common.p_func.vf_valid = FALSE; sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); sb_data_e1x.common.same_igu_sb_1b = TRUE; sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); hc_sm_p = sb_data_e1x.common.state_machine; sb_data_p = (uint32_t *)&sb_data_e1x; data_size = (sizeof(struct hc_status_block_data_e1x) / sizeof(uint32_t)); bxe_map_sb_state_machines(sb_data_e1x.index_data); } bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); /* write indices to HW - PCI guarantees endianity of regpairs */ bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); } static inline uint8_t bxe_fp_qzone_id(struct bxe_fastpath *fp) { if (CHIP_IS_E1x(fp->sc)) { return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); } else { return (fp->cl_id); } } static inline uint32_t bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, struct bxe_fastpath *fp) { uint32_t offset = BAR_USTRORM_INTMEM; if (!CHIP_IS_E1x(sc)) { offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); } else { offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); } return (offset); } static void bxe_init_eth_fp(struct bxe_softc *sc, int idx) { struct bxe_fastpath *fp = &sc->fp[idx]; uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; unsigned long q_type = 0; int cos; fp->sc = sc; fp->index = idx; fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); fp->cl_id = (CHIP_IS_E1x(sc)) ? (SC_L_ID(sc) + idx) : /* want client ID same as IGU SB ID for non-E1 */ fp->igu_sb_id; fp->cl_qzone_id = bxe_fp_qzone_id(fp); /* setup sb indices */ if (!CHIP_IS_E1x(sc)) { fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; } else { fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; } /* init shortcut */ fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; /* * XXX If multiple CoS is ever supported then each fastpath structure * will need to maintain tx producer/consumer/dma/etc values *per* CoS. */ for (cos = 0; cos < sc->max_cos; cos++) { cids[cos] = idx; } fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; /* nothing more for a VF to do */ if (IS_VF(sc)) { return; } bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, fp->fw_sb_id, fp->igu_sb_id); bxe_update_fp_sb_idx(fp); /* Configure Queue State object */ bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); ecore_init_queue_obj(sc, &sc->sp_objs[idx].q_obj, fp->cl_id, cids, sc->max_cos, SC_FUNC(sc), BXE_SP(sc, q_rdata), BXE_SP_MAPPING(sc, q_rdata), q_type); /* configure classification DBs */ ecore_init_mac_obj(sc, &sc->sp_objs[idx].mac_obj, fp->cl_id, idx, SC_FUNC(sc), BXE_SP(sc, mac_rdata), BXE_SP_MAPPING(sc, mac_rdata), ECORE_FILTER_MAC_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool); BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); } static inline void bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod, uint16_t rx_sge_prod) { struct ustorm_eth_rx_producers rx_prods = { 0 }; uint32_t i; /* update producers */ rx_prods.bd_prod = rx_bd_prod; rx_prods.cqe_prod = rx_cq_prod; rx_prods.sge_prod = rx_sge_prod; /* * Make sure that the BD and SGE data is updated before updating the * producers since FW might read the BD/SGE right after the producer * is updated. * This is only applicable for weak-ordered memory model archs such * as IA-64. The following barrier is also mandatory since FW will * assumes BDs must have buffers. */ wmb(); for (i = 0; i < (sizeof(rx_prods) / 4); i++) { REG_WR(sc, (fp->ustorm_rx_prods_offset + (i * 4)), ((uint32_t *)&rx_prods)[i]); } wmb(); /* keep prod updates ordered */ BLOGD(sc, DBG_RX, "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); } static void bxe_init_rx_rings(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->rx_bd_cons = 0; /* * Activate the BD ring... * Warning, this will generate an interrupt (to the TSTORM) * so this can only be done after the chip is initialized */ bxe_update_rx_prod(sc, fp, fp->rx_bd_prod, fp->rx_cq_prod, fp->rx_sge_prod); if (i != 0) { continue; } if (CHIP_IS_E1(sc)) { REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), U64_LO(fp->rcq_dma.paddr)); REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), U64_HI(fp->rcq_dma.paddr)); } } } static void bxe_init_tx_ring_one(struct bxe_fastpath *fp) { SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1); fp->tx_db.data.zero_fill1 = 0; fp->tx_db.data.prod = 0; fp->tx_pkt_prod = 0; fp->tx_pkt_cons = 0; fp->tx_bd_prod = 0; fp->tx_bd_cons = 0; fp->eth_q_stats.tx_pkts = 0; } static inline void bxe_init_tx_rings(struct bxe_softc *sc) { int i; for (i = 0; i < sc->num_queues; i++) { bxe_init_tx_ring_one(&sc->fp[i]); } } static void bxe_init_def_sb(struct bxe_softc *sc) { struct host_sp_status_block *def_sb = sc->def_sb; bus_addr_t mapping = sc->def_sb_dma.paddr; int igu_sp_sb_index; int igu_seg_id; int port = SC_PORT(sc); int func = SC_FUNC(sc); int reg_offset, reg_offset_en5; uint64_t section; int index, sindex; struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); if (CHIP_INT_MODE_IS_BC(sc)) { igu_sp_sb_index = DEF_SB_IGU_ID; igu_seg_id = HC_SEG_ACCESS_DEF; } else { igu_sp_sb_index = sc->igu_dsb_id; igu_seg_id = IGU_SEG_ACCESS_DEF; } /* attentions */ section = ((uint64_t)mapping + offsetof(struct host_sp_status_block, atten_status_block)); def_sb->atten_status_block.status_block_id = igu_sp_sb_index; sc->attn_state = 0; reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { /* take care of sig[0]..sig[4] */ for (sindex = 0; sindex < 4; sindex++) { sc->attn_group[index].sig[sindex] = REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); } if (!CHIP_IS_E1x(sc)) { /* * enable5 is separate from the rest of the registers, * and the address skip is 4 and not 16 between the * different groups */ sc->attn_group[index].sig[4] = REG_RD(sc, (reg_offset_en5 + (0x4 * index))); } else { sc->attn_group[index].sig[4] = 0; } } if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_offset = (port) ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L; REG_WR(sc, reg_offset, U64_LO(section)); REG_WR(sc, (reg_offset + 4), U64_HI(section)); } else if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); } section = ((uint64_t)mapping + offsetof(struct host_sp_status_block, sp_sb)); bxe_zero_sp_sb(sc); /* PCI guarantees endianity of regpair */ sp_sb_data.state = SB_ENABLED; sp_sb_data.host_sb_addr.lo = U64_LO(section); sp_sb_data.host_sb_addr.hi = U64_HI(section); sp_sb_data.igu_sb_id = igu_sp_sb_index; sp_sb_data.igu_seg_id = igu_seg_id; sp_sb_data.p_func.pf_id = func; sp_sb_data.p_func.vnic_id = SC_VN(sc); sp_sb_data.p_func.vf_id = 0xff; bxe_wr_sp_sb_data(sc, &sp_sb_data); bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); } static void bxe_init_sp_ring(struct bxe_softc *sc) { atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); sc->spq_prod_idx = 0; sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; sc->spq_prod_bd = sc->spq; sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); } static void bxe_init_eq_ring(struct bxe_softc *sc) { union event_ring_elem *elem; int i; for (i = 1; i <= NUM_EQ_PAGES; i++) { elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); } sc->eq_cons = 0; sc->eq_prod = NUM_EQ_DESC; sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; atomic_store_rel_long(&sc->eq_spq_left, (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), NUM_EQ_DESC) - 1)); } static void bxe_init_internal_common(struct bxe_softc *sc) { int i; /* * Zero this manually as its initialization is currently missing * in the initTool. */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 0); } if (!CHIP_IS_E1x(sc)) { REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); } } static void bxe_init_internal(struct bxe_softc *sc, uint32_t load_code) { switch (load_code) { case FW_MSG_CODE_DRV_LOAD_COMMON: case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: bxe_init_internal_common(sc); /* no break */ case FW_MSG_CODE_DRV_LOAD_PORT: /* nothing to do */ /* no break */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: /* internal memory per function is initialized inside bxe_pf_init */ break; default: BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); break; } } static void storm_memset_func_cfg(struct bxe_softc *sc, struct tstorm_eth_function_common_config *tcfg, uint16_t abs_fid) { uint32_t addr; size_t size; addr = (BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); size = sizeof(struct tstorm_eth_function_common_config); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); } static void bxe_func_init(struct bxe_softc *sc, struct bxe_func_init_params *p) { struct tstorm_eth_function_common_config tcfg = { 0 }; if (CHIP_IS_E1x(sc)) { storm_memset_func_cfg(sc, &tcfg, p->func_id); } /* Enable the function in the FW */ storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); storm_memset_func_en(sc, p->func_id, 1); /* spq */ if (p->func_flgs & FUNC_FLG_SPQ) { storm_memset_spq_addr(sc, p->spq_map, p->func_id); REG_WR(sc, (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod); } } /* * Calculates the sum of vn_min_rates. * It's needed for further normalizing of the min_rates. * Returns: * sum of vn_min_rates. * or * 0 - if all the min_rates are 0. * In the later case fainess algorithm should be deactivated. * If all min rates are not zero then those that are zeroes will be set to 1. */ static void bxe_calc_vn_min(struct bxe_softc *sc, struct cmng_init_input *input) { uint32_t vn_cfg; uint32_t vn_min_rate; int all_zero = 1; int vn; for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { vn_cfg = sc->devinfo.mf_info.mf_config[vn]; vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100); if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { /* skip hidden VNs */ vn_min_rate = 0; } else if (!vn_min_rate) { /* If min rate is zero - set it to 100 */ vn_min_rate = DEF_MIN_RATE; } else { all_zero = 0; } input->vnic_min_rate[vn] = vn_min_rate; } /* if ETS or all min rates are zeros - disable fairness */ if (BXE_IS_ETS_ENABLED(sc)) { input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); } else if (all_zero) { input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; BLOGD(sc, DBG_LOAD, "Fariness disabled (all MIN values are zeroes)\n"); } else { input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } } static inline uint16_t bxe_extract_max_cfg(struct bxe_softc *sc, uint32_t mf_cfg) { uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); if (!max_cfg) { BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); max_cfg = 100; } return (max_cfg); } static void bxe_calc_vn_max(struct bxe_softc *sc, int vn, struct cmng_init_input *input) { uint16_t vn_max_rate; uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; uint32_t max_cfg; if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { vn_max_rate = 0; } else { max_cfg = bxe_extract_max_cfg(sc, vn_cfg); if (IS_MF_SI(sc)) { /* max_cfg in percents of linkspeed */ vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); } else { /* SD modes */ /* max_cfg is absolute in 100Mb units */ vn_max_rate = (max_cfg * 100); } } BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); input->vnic_max_rate[vn] = vn_max_rate; } static void bxe_cmng_fns_init(struct bxe_softc *sc, uint8_t read_cfg, uint8_t cmng_type) { struct cmng_init_input input; int vn; memset(&input, 0, sizeof(struct cmng_init_input)); input.port_rate = sc->link_vars.line_speed; if (cmng_type == CMNG_FNS_MINMAX) { /* read mf conf from shmem */ if (read_cfg) { bxe_read_mf_cfg(sc); } /* get VN min rate and enable fairness if not 0 */ bxe_calc_vn_min(sc, &input); /* get VN max rate */ if (sc->port.pmf) { for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { bxe_calc_vn_max(sc, vn, &input); } } /* always enable rate shaping and fairness */ input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; ecore_init_cmng(&input, &sc->cmng); return; } /* rate shaping and fairness are disabled */ BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); } static int bxe_get_cmng_fns_mode(struct bxe_softc *sc) { if (CHIP_REV_IS_SLOW(sc)) { return (CMNG_FNS_NONE); } if (IS_MF(sc)) { return (CMNG_FNS_MINMAX); } return (CMNG_FNS_NONE); } static void storm_memset_cmng(struct bxe_softc *sc, struct cmng_init *cmng, uint8_t port) { int vn; int func; uint32_t addr; size_t size; addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); size = sizeof(struct cmng_struct_per_port); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { func = func_by_vn(sc, vn); addr = (BAR_XSTRORM_INTMEM + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); size = sizeof(struct rate_shaping_vars_per_vn); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); addr = (BAR_XSTRORM_INTMEM + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); size = sizeof(struct fairness_vars_per_vn); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); } } static void bxe_pf_init(struct bxe_softc *sc) { struct bxe_func_init_params func_init = { 0 }; struct event_ring_data eq_data = { { 0 } }; uint16_t flags; if (!CHIP_IS_E1x(sc)) { /* reset IGU PF statistics: MSIX + ATTN */ /* PF */ REG_WR(sc, (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (BXE_IGU_STAS_MSG_VF_CNT * 4) + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 0); /* ATTN */ REG_WR(sc, (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (BXE_IGU_STAS_MSG_VF_CNT * 4) + (BXE_IGU_STAS_MSG_PF_CNT * 4) + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 0); } /* function setup flags */ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); /* * This flag is relevant for E1x only. * E2 doesn't have a TPA configuration in a function level. */ flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; func_init.func_flgs = flags; func_init.pf_id = SC_FUNC(sc); func_init.func_id = SC_FUNC(sc); func_init.spq_map = sc->spq_dma.paddr; func_init.spq_prod = sc->spq_prod_idx; bxe_func_init(sc, &func_init); memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); /* * Congestion management values depend on the link rate. * There is no active link so initial link rate is set to 10Gbps. * When the link comes up the congestion management values are * re-calculated according to the actual link rate. */ sc->link_vars.line_speed = SPEED_10000; bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); /* Only the PMF sets the HW */ if (sc->port.pmf) { storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } /* init Event Queue - PCI bus guarantees correct endainity */ eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); eq_data.producer = sc->eq_prod; eq_data.index_id = HC_SP_INDEX_EQ_CONS; eq_data.sb_id = DEF_SB_ID; storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); } static void bxe_hc_int_enable(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; uint32_t val = REG_RD(sc, addr); uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && (sc->intr_count == 1)) ? TRUE : FALSE; uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; if (msix) { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0); val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (single_msix) { val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; } } else if (msi) { val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (!CHIP_IS_E1(sc)) { BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); REG_WR(sc, addr, val); val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; } } if (CHIP_IS_E1(sc)) { REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); } BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); REG_WR(sc, addr, val); /* ensure that HC_CONFIG is written before leading/trailing edge config */ mb(); if (!CHIP_IS_E1(sc)) { /* init leading/trailing edge */ if (IS_MF(sc)) { val = (0xee0f | (1 << (SC_VN(sc) + 4))); if (sc->port.pmf) { /* enable nig and gpio3 attention */ val |= 0x1100; } } else { val = 0xffff; } REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); } /* make sure that interrupts are indeed enabled from here on */ mb(); } static void bxe_igu_int_enable(struct bxe_softc *sc) { uint32_t val; uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && (sc->intr_count == 1)) ? TRUE : FALSE; uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); if (msix) { val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); if (single_msix) { val |= IGU_PF_CONF_SINGLE_ISR_EN; } } else if (msi) { val &= ~IGU_PF_CONF_INT_LINE_EN; val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } else { val &= ~IGU_PF_CONF_MSI_MSIX_EN; val |= (IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } /* clean previous status - need to configure igu prior to ack*/ if ((!msix) || single_msix) { REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); bxe_ack_int(sc); } val |= IGU_PF_CONF_FUNC_EN; BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); mb(); /* init leading/trailing edge */ if (IS_MF(sc)) { val = (0xee0f | (1 << (SC_VN(sc) + 4))); if (sc->port.pmf) { /* enable nig and gpio3 attention */ val |= 0x1100; } } else { val = 0xffff; } REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); /* make sure that interrupts are indeed enabled from here on */ mb(); } static void bxe_int_enable(struct bxe_softc *sc) { if (sc->devinfo.int_block == INT_BLOCK_HC) { bxe_hc_int_enable(sc); } else { bxe_igu_int_enable(sc); } } static void bxe_hc_int_disable(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; uint32_t val = REG_RD(sc, addr); /* * In E1 we must use only PCI configuration space to disable MSI/MSIX * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC * block */ if (CHIP_IS_E1(sc)) { /* * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register * to prevent from HC sending interrupts after we exit the function */ REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); /* flush all outstanding writes */ mb(); REG_WR(sc, addr, val); if (REG_RD(sc, addr) != val) { BLOGE(sc, "proper val not read from HC IGU!\n"); } } static void bxe_igu_int_disable(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); val &= ~(IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); /* flush all outstanding writes */ mb(); REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { BLOGE(sc, "proper val not read from IGU!\n"); } } static void bxe_int_disable(struct bxe_softc *sc) { if (sc->devinfo.int_block == INT_BLOCK_HC) { bxe_hc_int_disable(sc); } else { bxe_igu_int_disable(sc); } } static void bxe_nic_init(struct bxe_softc *sc, int load_code) { int i; for (i = 0; i < sc->num_queues; i++) { bxe_init_eth_fp(sc, i); } rmb(); /* ensure status block indices were read */ bxe_init_rx_rings(sc); bxe_init_tx_rings(sc); if (IS_VF(sc)) { return; } /* initialize MOD_ABS interrupts */ elink_init_mod_abs_int(sc, &sc->link_vars, sc->devinfo.chip_id, sc->devinfo.shmem_base, sc->devinfo.shmem2_base, SC_PORT(sc)); bxe_init_def_sb(sc); bxe_update_dsb_idx(sc); bxe_init_sp_ring(sc); bxe_init_eq_ring(sc); bxe_init_internal(sc, load_code); bxe_pf_init(sc); bxe_stats_init(sc); /* flush all before enabling interrupts */ mb(); bxe_int_enable(sc); /* check for SPIO5 */ bxe_attn_int_deasserted0(sc, REG_RD(sc, (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + SC_PORT(sc)*4)) & AEU_INPUTS_ATTN_BITS_SPIO5); } static inline void bxe_init_objs(struct bxe_softc *sc) { /* mcast rules must be added to tx if tx switching is enabled */ ecore_obj_type o_type = (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : ECORE_OBJ_TYPE_RX; /* RX_MODE controlling object */ ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); /* multicast configuration controlling object */ ecore_init_mcast_obj(sc, &sc->mcast_obj, sc->fp[0].cl_id, sc->fp[0].index, SC_FUNC(sc), SC_FUNC(sc), BXE_SP(sc, mcast_rdata), BXE_SP_MAPPING(sc, mcast_rdata), ECORE_FILTER_MCAST_PENDING, &sc->sp_state, o_type); /* Setup CAM credit pools */ ecore_init_mac_credit_pool(sc, &sc->macs_pool, SC_FUNC(sc), CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : VNICS_PER_PATH(sc)); ecore_init_vlan_credit_pool(sc, &sc->vlans_pool, SC_ABS_FUNC(sc) >> 1, CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : VNICS_PER_PATH(sc)); /* RSS configuration object */ ecore_init_rss_config_obj(sc, &sc->rss_conf_obj, sc->fp[0].cl_id, sc->fp[0].index, SC_FUNC(sc), SC_FUNC(sc), BXE_SP(sc, rss_rdata), BXE_SP_MAPPING(sc, rss_rdata), ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX); } /* * Initialize the function. This must be called before sending CLIENT_SETUP * for the first client. */ static inline int bxe_func_start(struct bxe_softc *sc) { struct ecore_func_state_params func_params = { NULL }; struct ecore_func_start_params *start_params = &func_params.params.start; /* Prepare parameters for function state transitions */ bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_START; /* Function parameters */ start_params->mf_mode = sc->devinfo.mf_info.mf_mode; start_params->sd_vlan_tag = OVLAN(sc); if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { start_params->network_cos_mode = STATIC_COS; } else { /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; } //start_params->gre_tunnel_mode = 0; //start_params->gre_tunnel_rss = 0; return (ecore_func_state_change(sc, &func_params)); } static int bxe_set_power_state(struct bxe_softc *sc, uint8_t state) { uint16_t pmcsr; /* If there is no power capability, silently succeed */ if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { BLOGW(sc, "No power capability\n"); return (0); } pmcsr = pci_read_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 2); switch (state) { case PCI_PM_D0: pci_write_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); if (pmcsr & PCIM_PSTAT_DMASK) { /* delay required during transition out of D3hot */ DELAY(20000); } break; case PCI_PM_D3hot: /* XXX if there are other clients above don't shut down the power */ /* don't shut down the power for emulation and FPGA */ if (CHIP_REV_IS_SLOW(sc)) { return (0); } pmcsr &= ~PCIM_PSTAT_DMASK; pmcsr |= PCIM_PSTAT_D3; if (sc->wol) { pmcsr |= PCIM_PSTAT_PMEENABLE; } pci_write_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), pmcsr, 4); /* * No more memory access after this point until device is brought back * to D0 state. */ break; default: BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n", state, pmcsr); return (-1); } return (0); } /* return true if succeeded to acquire the lock */ static uint8_t bxe_trylock_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGD(sc, DBG_LOAD, "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return (FALSE); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); } /* try to acquire the lock */ REG_WR(sc, hw_lock_control_reg + 4, resource_bit); lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { return (TRUE); } BLOGE(sc, "Failed to get a resource lock 0x%x func %d " "lock_status 0x%x resource_bit 0x%x\n", resource, func, lock_status, resource_bit); return (FALSE); } /* * Get the recovery leader resource id according to the engine this function * belongs to. Currently only only 2 engines is supported. */ static int bxe_get_leader_lock_resource(struct bxe_softc *sc) { if (SC_PATH(sc)) { return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); } else { return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); } } /* try to acquire a leader lock for current engine */ static uint8_t bxe_trylock_leader_lock(struct bxe_softc *sc) { return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); } static int bxe_release_leader_lock(struct bxe_softc *sc) { return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); } /* close gates #2, #3 and #4 */ static void bxe_set_234_gates(struct bxe_softc *sc, uint8_t close) { uint32_t val; /* gates #2 and #4a are closed/opened for "not E1" only */ if (!CHIP_IS_E1(sc)) { /* #4 */ REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); /* #2 */ REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); } /* #3 */ if (CHIP_IS_E1x(sc)) { /* prevent interrupts from HC on both ports */ val = REG_RD(sc, HC_REG_CONFIG_1); REG_WR(sc, HC_REG_CONFIG_1, (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); val = REG_RD(sc, HC_REG_CONFIG_0); REG_WR(sc, HC_REG_CONFIG_0, (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); } else { /* Prevent incomming interrupts in IGU */ val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, (!close) ? (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); } BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); wmb(); } /* poll for pending writes bit, it should get cleared in no more than 1s */ static int bxe_er_poll_igu_vq(struct bxe_softc *sc) { uint32_t cnt = 1000; uint32_t pend_bits = 0; do { pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); if (pend_bits == 0) { break; } DELAY(1000); } while (--cnt > 0); if (cnt == 0) { BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); return (-1); } return (0); } #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ static void bxe_clp_reset_prep(struct bxe_softc *sc, uint32_t *magic_val) { /* Do some magic... */ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); *magic_val = val & SHARED_MF_CLP_MAGIC; MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); } /* restore the value of the 'magic' bit */ static void bxe_clp_reset_done(struct bxe_softc *sc, uint32_t magic_val) { /* Restore the 'magic' bit value... */ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); MFCFG_WR(sc, shared_mf_config.clp_mb, (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); } /* prepare for MCP reset, takes care of CLP configurations */ static void bxe_reset_mcp_prep(struct bxe_softc *sc, uint32_t *magic_val) { uint32_t shmem; uint32_t validity_offset; /* set `magic' bit in order to save MF config */ if (!CHIP_IS_E1(sc)) { bxe_clp_reset_prep(sc, magic_val); } /* get shmem offset */ shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); validity_offset = offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); /* Clear validity map flags */ if (shmem > 0) { REG_WR(sc, shmem + validity_offset, 0); } } #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ #define MCP_ONE_TIMEOUT 100 /* 100 ms */ static void bxe_mcp_wait_one(struct bxe_softc *sc) { /* special handling for emulation and FPGA (10 times longer) */ if (CHIP_REV_IS_SLOW(sc)) { DELAY((MCP_ONE_TIMEOUT*10) * 1000); } else { DELAY((MCP_ONE_TIMEOUT) * 1000); } } /* initialize shmem_base and waits for validity signature to appear */ static int bxe_init_shmem(struct bxe_softc *sc) { int cnt = 0; uint32_t val = 0; do { sc->devinfo.shmem_base = sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); if (sc->devinfo.shmem_base) { val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); if (val & SHR_MEM_VALIDITY_MB) return (0); } bxe_mcp_wait_one(sc); } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); BLOGE(sc, "BAD MCP validity signature\n"); return (-1); } static int bxe_reset_mcp_comp(struct bxe_softc *sc, uint32_t magic_val) { int rc = bxe_init_shmem(sc); /* Restore the `magic' bit value */ if (!CHIP_IS_E1(sc)) { bxe_clp_reset_done(sc, magic_val); } return (rc); } static void bxe_pxp_prep(struct bxe_softc *sc) { if (!CHIP_IS_E1(sc)) { REG_WR(sc, PXP2_REG_RD_START_INIT, 0); REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); wmb(); } } /* * Reset the whole chip except for: * - PCIE core * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) * - IGU * - MISC (including AEU) * - GRC * - RBCN, RBCP */ static void bxe_process_kill_chip_reset(struct bxe_softc *sc, uint8_t global) { uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; uint32_t global_bits2, stay_reset2; /* * Bits that have to be set in reset_mask2 if we want to reset 'global' * (per chip) blocks. */ global_bits2 = MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; /* * Don't reset the following blocks. * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be * reset, as in 4 port device they might still be owned * by the MCP (there is only one leader per path). */ not_reset_mask1 = MISC_REGISTERS_RESET_REG_1_RST_HC | MISC_REGISTERS_RESET_REG_1_RST_PXPV | MISC_REGISTERS_RESET_REG_1_RST_PXP; not_reset_mask2 = MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | MISC_REGISTERS_RESET_REG_2_RST_RBCN | MISC_REGISTERS_RESET_REG_2_RST_GRC | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | MISC_REGISTERS_RESET_REG_2_RST_ATC | MISC_REGISTERS_RESET_REG_2_PGLC | MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1; /* * Keep the following blocks in reset: * - all xxMACs are handled by the elink code. */ stay_reset2 = MISC_REGISTERS_RESET_REG_2_XMAC | MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; /* Full reset masks according to the chip */ reset_mask1 = 0xffffffff; if (CHIP_IS_E1(sc)) reset_mask2 = 0xffff; else if (CHIP_IS_E1H(sc)) reset_mask2 = 0x1ffff; else if (CHIP_IS_E2(sc)) reset_mask2 = 0xfffff; else /* CHIP_IS_E3 */ reset_mask2 = 0x3ffffff; /* Don't reset global blocks unless we need to */ if (!global) reset_mask2 &= ~global_bits2; /* * In case of attention in the QM, we need to reset PXP * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM * because otherwise QM reset would release 'close the gates' shortly * before resetting the PXP, then the PSWRQ would send a write * request to PGLUE. Then when PXP is reset, PGLUE would try to * read the payload data from PSWWR, but PSWWR would not * respond. The write queue in PGLUE would stuck, dmae commands * would not return. Therefore it's important to reset the second * reset register (containing the * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM * bit). */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, reset_mask2 & (~not_reset_mask2)); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, reset_mask1 & (~not_reset_mask1)); mb(); wmb(); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2 & (~stay_reset2)); mb(); wmb(); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); wmb(); } static int bxe_process_kill(struct bxe_softc *sc, uint8_t global) { int cnt = 1000; uint32_t val = 0; uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; uint32_t tags_63_32 = 0; /* Empty the Tetris buffer, wait for 1s */ do { sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); if (CHIP_IS_E3(sc)) { tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); } if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && ((port_is_idle_0 & 0x1) == 0x1) && ((port_is_idle_1 & 0x1) == 0x1) && (pgl_exp_rom2 == 0xffffffff) && (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) break; DELAY(1000); } while (cnt-- > 0); if (cnt <= 0) { BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " "are still outstanding read requests after 1s! " "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2); return (-1); } mb(); /* Close gates #2, #3 and #4 */ bxe_set_234_gates(sc, TRUE); /* Poll for IGU VQs for 57712 and newer chips */ if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { return (-1); } /* XXX indicate that "process kill" is in progress to MCP */ /* clear "unprepared" bit */ REG_WR(sc, MISC_REG_UNPREPARED, 0); mb(); /* Make sure all is written to the chip before the reset */ wmb(); /* * Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ DELAY(1000); /* Prepare to chip reset: */ /* MCP */ if (global) { bxe_reset_mcp_prep(sc, &val); } /* PXP */ bxe_pxp_prep(sc); mb(); /* reset the chip */ bxe_process_kill_chip_reset(sc, global); mb(); /* clear errors in PGB */ if (!CHIP_IS_E1(sc)) REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); /* Recover after reset: */ /* MCP */ if (global && bxe_reset_mcp_comp(sc, val)) { return (-1); } /* XXX add resetting the NO_MCP mode DB here */ /* Open the gates #2, #3 and #4 */ bxe_set_234_gates(sc, FALSE); /* XXX * IGU/AEU preparation bring back the AEU/IGU to a reset state * re-enable attentions */ return (0); } static int bxe_leader_reset(struct bxe_softc *sc) { int rc = 0; uint8_t global = bxe_reset_is_global(sc); uint32_t load_code; /* * If not going to reset MCP, load "fake" driver to reset HW while * driver is owner of the HW. */ if (!global && !BXE_NOMCP(sc)) { load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, DRV_MSG_CODE_LOAD_REQ_WITH_LFA); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; goto exit_leader_reset; } if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { BLOGE(sc, "MCP unexpected response, aborting\n"); rc = -1; goto exit_leader_reset2; } load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; goto exit_leader_reset2; } } /* try to recover after the failure */ if (bxe_process_kill(sc, global)) { BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); rc = -1; goto exit_leader_reset2; } /* * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver * state. */ bxe_set_reset_done(sc); if (global) { bxe_clear_reset_global(sc); } exit_leader_reset2: /* unload "fake driver" if it was loaded */ if (!global && !BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); } exit_leader_reset: sc->is_leader = 0; bxe_release_leader_lock(sc); mb(); return (rc); } /* * prepare INIT transition, parameters configured: * - HC configuration * - Queue's CDU context */ static void bxe_pf_q_prep_init(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_queue_init_params *init_params) { uint8_t cos; int cxt_index, cxt_offset; bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); /* HC rate */ init_params->rx.hc_rate = sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; init_params->tx.hc_rate = sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; /* FW SB ID */ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; /* CQ index among the SB indices */ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; /* set maximum number of COSs supported by this queue */ init_params->max_cos = sc->max_cos; BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", fp->index, init_params->max_cos); /* set the context pointers queue object */ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { /* XXX change index/cid here if ever support multiple tx CoS */ /* fp->txdata[cos]->cid */ cxt_index = fp->index / ILT_PAGE_CIDS; cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; } } /* set flags that are common for the Tx-only and not normal connections */ static unsigned long bxe_get_common_flags(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t zero_stats) { unsigned long flags = 0; /* PF driver will always initialize the Queue to an ACTIVE state */ bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); /* * tx only connections collect statistics (on the same index as the * parent connection). The statistics are zeroed when the parent * connection is initialized. */ bxe_set_bit(ECORE_Q_FLG_STATS, &flags); if (zero_stats) { bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); } /* * tx only connections can support tx-switching, though their * CoS-ness doesn't survive the loopback */ if (sc->flags & BXE_TX_SWITCHING) { bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); } bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); return (flags); } static unsigned long bxe_get_q_flags(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t leading) { unsigned long flags = 0; if (IS_MF_SD(sc)) { bxe_set_bit(ECORE_Q_FLG_OV, &flags); } if (if_getcapenable(sc->ifp) & IFCAP_LRO) { bxe_set_bit(ECORE_Q_FLG_TPA, &flags); bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); } if (leading) { bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); } bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); /* merge with common flags */ return (flags | bxe_get_common_flags(sc, fp, TRUE)); } static void bxe_pf_q_prep_general(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_general_setup_params *gen_init, uint8_t cos) { gen_init->stat_id = bxe_stats_id(fp); gen_init->spcl_id = fp->cl_id; gen_init->mtu = sc->mtu; gen_init->cos = cos; } static void bxe_pf_rx_q_prep(struct bxe_softc *sc, struct bxe_fastpath *fp, struct rxq_pause_params *pause, struct ecore_rxq_setup_params *rxq_init) { uint8_t max_sge = 0; uint16_t sge_sz = 0; uint16_t tpa_agg_size = 0; pause->sge_th_lo = SGE_TH_LO(sc); pause->sge_th_hi = SGE_TH_HI(sc); /* validate SGE ring has enough to cross high threshold */ if (sc->dropless_fc && (pause->sge_th_hi + FW_PREFETCH_CNT) > (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { BLOGW(sc, "sge ring threshold limit\n"); } /* minimum max_aggregation_size is 2*MTU (two full buffers) */ tpa_agg_size = (2 * sc->mtu); if (tpa_agg_size < sc->max_aggregation_size) { tpa_agg_size = sc->max_aggregation_size; } max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; max_sge = ((max_sge + PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); /* pause - not for e1 */ if (!CHIP_IS_E1(sc)) { pause->bd_th_lo = BD_TH_LO(sc); pause->bd_th_hi = BD_TH_HI(sc); pause->rcq_th_lo = RCQ_TH_LO(sc); pause->rcq_th_hi = RCQ_TH_HI(sc); /* validate rings have enough entries to cross high thresholds */ if (sc->dropless_fc && pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) { BLOGW(sc, "rx bd ring threshold limit\n"); } if (sc->dropless_fc && pause->rcq_th_hi + FW_PREFETCH_CNT > RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { BLOGW(sc, "rcq ring threshold limit\n"); } pause->pri_map = 1; } /* rxq setup */ rxq_init->dscr_map = fp->rx_dma.paddr; rxq_init->sge_map = fp->rx_sge_dma.paddr; rxq_init->rcq_map = fp->rcq_dma.paddr; rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); /* * This should be a maximum number of data bytes that may be * placed on the BD (not including paddings). */ rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING); rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; rxq_init->sge_buf_sz = sge_sz; rxq_init->max_sges_pkt = max_sge; rxq_init->rss_engine_id = SC_FUNC(sc); rxq_init->mcast_engine_id = SC_FUNC(sc); /* * Maximum number or simultaneous TPA aggregation for this Queue. * For PF Clients it should be the maximum available number. * VF driver(s) may want to define it to a smaller value. */ rxq_init->max_tpa_queues = MAX_AGG_QS(sc); rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; rxq_init->fw_sb_id = fp->fw_sb_id; rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; /* * configure silent vlan removal * if multi function mode is afex, then mask default vlan */ if (IS_MF_AFEX(sc)) { rxq_init->silent_removal_value = sc->devinfo.mf_info.afex_def_vlan_tag; rxq_init->silent_removal_mask = EVL_VLID_MASK; } } static void bxe_pf_tx_q_prep(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_txq_setup_params *txq_init, uint8_t cos) { /* * XXX If multiple CoS is ever supported then each fastpath structure * will need to maintain tx producer/consumer/dma/etc values *per* CoS. * fp->txdata[cos]->tx_dma.paddr; */ txq_init->dscr_map = fp->tx_dma.paddr; txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; txq_init->fw_sb_id = fp->fw_sb_id; /* * set the TSS leading client id for TX classfication to the * leading RSS client id */ txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); } /* * This function performs 2 steps in a queue state machine: * 1) RESET->INIT * 2) INIT->SETUP */ static int bxe_setup_queue(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t leading) { struct ecore_queue_state_params q_params = { NULL }; struct ecore_queue_setup_params *setup_params = &q_params.params.setup; int rc; BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; /* we want to wait for completion in this context */ bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* prepare the INIT parameters */ bxe_pf_q_prep_init(sc, fp, &q_params.params.init); /* Set the command */ q_params.cmd = ECORE_Q_CMD_INIT; /* Change the state to INIT */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc); return (rc); } BLOGD(sc, DBG_LOAD, "init complete\n"); /* now move the Queue to the SETUP state */ memset(setup_params, 0, sizeof(*setup_params)); /* set Queue flags */ setup_params->flags = bxe_get_q_flags(sc, fp, leading); /* set general SETUP parameters */ bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, FIRST_TX_COS_INDEX); bxe_pf_rx_q_prep(sc, fp, &setup_params->pause_params, &setup_params->rxq_params); bxe_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); /* Set the command */ q_params.cmd = ECORE_Q_CMD_SETUP; /* change the state to SETUP */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc); return (rc); } return (rc); } static int bxe_setup_leading(struct bxe_softc *sc) { return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); } static int bxe_config_rss_pf(struct bxe_softc *sc, struct ecore_rss_config_obj *rss_obj, uint8_t config_hash) { struct ecore_config_rss_params params = { NULL }; int i; /* * Although RSS is meaningless when there is a single HW queue we * still need it enabled in order to have HW Rx hash generated. */ params.rss_obj = rss_obj; bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); /* RSS configuration */ bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); if (rss_obj->udp_rss_v4) { bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); } if (rss_obj->udp_rss_v6) { bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); } /* Hash bits */ params.rss_result_mask = MULTI_MASK; memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); if (config_hash) { /* RSS keys */ for (i = 0; i < sizeof(params.rss_key) / 4; i++) { params.rss_key[i] = arc4random(); } bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); } return (ecore_config_rss(sc, ¶ms)); } static int bxe_config_rss_eth(struct bxe_softc *sc, uint8_t config_hash) { return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); } static int bxe_init_rss_pf(struct bxe_softc *sc) { uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); int i; /* * Prepare the initial contents of the indirection table if * RSS is enabled */ for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { sc->rss_conf_obj.ind_table[i] = (sc->fp->cl_id + (i % num_eth_queues)); } if (sc->udp_rss) { sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; } /* * For 57710 and 57711 SEARCHER configuration (rss_keys) is * per-port, so if explicit configuration is needed, do it only * for a PMF. * * For 57712 and newer it's a per-function configuration. */ return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); } static int bxe_set_mac_one(struct bxe_softc *sc, uint8_t *mac, struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type, unsigned long *ramrod_flags) { struct ecore_vlan_mac_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* fill in general parameters */ ramrod_param.vlan_mac_obj = obj; ramrod_param.ramrod_flags = *ramrod_flags; /* fill a user request section if needed */ if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); /* Set the command: ADD or DEL */ ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : ECORE_VLAN_MAC_DEL; } rc = ecore_config_vlan_mac(sc, &ramrod_param); if (rc == ECORE_EXISTS) { BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); /* do not treat adding same MAC as error */ rc = 0; } else if (rc < 0) { BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); } return (rc); } static int bxe_set_eth_mac(struct bxe_softc *sc, uint8_t set) { unsigned long ramrod_flags = 0; BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Eth MAC is set on RSS leading client (fp[0]) */ return (bxe_set_mac_one(sc, sc->link_params.mac_addr, &sc->sp_objs->mac_obj, set, ECORE_ETH_MAC, &ramrod_flags)); } static int bxe_get_cur_phy_idx(struct bxe_softc *sc) { uint32_t sel_phy_idx = 0; if (sc->link_params.num_phys <= 1) { return (ELINK_INT_PHY); } if (sc->link_vars.link_up) { sel_phy_idx = ELINK_EXT_PHY1; /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && (sc->link_params.phy[ELINK_EXT_PHY2].supported & ELINK_SUPPORTED_FIBRE)) sel_phy_idx = ELINK_EXT_PHY2; } else { switch (elink_phy_selection(&sc->link_params)) { case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: sel_phy_idx = ELINK_EXT_PHY1; break; case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: sel_phy_idx = ELINK_EXT_PHY2; break; } } return (sel_phy_idx); } static int bxe_get_link_cfg_idx(struct bxe_softc *sc) { uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); /* * The selected activated PHY is always after swapping (in case PHY * swapping is enabled). So when swapping is enabled, we need to reverse * the configuration */ if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { if (sel_phy_idx == ELINK_EXT_PHY1) sel_phy_idx = ELINK_EXT_PHY2; else if (sel_phy_idx == ELINK_EXT_PHY2) sel_phy_idx = ELINK_EXT_PHY1; } return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); } static void bxe_set_requested_fc(struct bxe_softc *sc) { /* * Initialize link parameters structure variables * It is recommended to turn off RX FC for jumbo frames * for better performance */ if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; } else { sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; } } static void bxe_calc_fc_adv(struct bxe_softc *sc) { uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); switch (sc->link_vars.ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: default: sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; break; } } static uint16_t bxe_get_mf_speed(struct bxe_softc *sc) { uint16_t line_speed = sc->link_vars.line_speed; if (IS_MF(sc)) { uint16_t maxCfg = bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); /* calculate the current MAX line speed limit for the MF devices */ if (IS_MF_SI(sc)) { line_speed = (line_speed * maxCfg) / 100; } else { /* SD mode */ uint16_t vn_max_rate = maxCfg * 100; if (vn_max_rate < line_speed) { line_speed = vn_max_rate; } } } return (line_speed); } static void bxe_fill_report_data(struct bxe_softc *sc, struct bxe_link_report_data *data) { uint16_t line_speed = bxe_get_mf_speed(sc); memset(data, 0, sizeof(*data)); /* fill the report data with the effective line speed */ data->line_speed = line_speed; /* Link is down */ if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); } /* Full DUPLEX */ if (sc->link_vars.duplex == DUPLEX_FULL) { bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); } /* Rx Flow Control is ON */ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); } /* Tx Flow Control is ON */ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); } } /* report link status to OS, should be called under phy_lock */ static void bxe_link_report_locked(struct bxe_softc *sc) { struct bxe_link_report_data cur_data; /* reread mf_cfg */ if (IS_PF(sc) && !CHIP_IS_E1(sc)) { bxe_read_mf_cfg(sc); } /* Read the current link report info */ bxe_fill_report_data(sc, &cur_data); /* Don't report link down or exactly the same link status twice */ if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &sc->last_reported_link.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags))) { return; } sc->link_cnt++; /* report new link params and remember the state for the next time */ memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags)) { if_link_state_change(sc->ifp, LINK_STATE_DOWN); BLOGI(sc, "NIC Link is Down\n"); } else { const char *duplex; const char *flow; if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, &cur_data.link_report_flags)) { duplex = "full"; } else { duplex = "half"; } /* * Handle the FC at the end so that only these flags would be * possibly set. This way we may easily check if there is no FC * enabled. */ if (cur_data.link_report_flags) { if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - receive & transmit"; } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - receive"; } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - transmit"; } else { flow = "none"; /* possible? */ } } else { flow = "none"; } if_link_state_change(sc->ifp, LINK_STATE_UP); BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", cur_data.line_speed, duplex, flow); } } static void bxe_link_report(struct bxe_softc *sc) { bxe_acquire_phy_lock(sc); bxe_link_report_locked(sc); bxe_release_phy_lock(sc); } static void bxe_link_status_update(struct bxe_softc *sc) { if (sc->state != BXE_STATE_OPEN) { return; } if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { elink_link_status_update(&sc->link_params, &sc->link_vars); } else { sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | ELINK_SUPPORTED_10baseT_Full | ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full | ELINK_SUPPORTED_1000baseT_Full | ELINK_SUPPORTED_2500baseX_Full | ELINK_SUPPORTED_10000baseT_Full | ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE | ELINK_SUPPORTED_Autoneg | ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause); sc->port.advertising[0] = sc->port.supported[0]; sc->link_params.sc = sc; sc->link_params.port = SC_PORT(sc); sc->link_params.req_duplex[0] = DUPLEX_FULL; sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; sc->link_params.req_line_speed[0] = SPEED_10000; sc->link_params.speed_cap_mask[0] = 0x7f0000; sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; if (CHIP_REV_IS_FPGA(sc)) { sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; sc->link_vars.line_speed = ELINK_SPEED_1000; sc->link_vars.link_status = (LINK_STATUS_LINK_UP | LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); } else { sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; sc->link_vars.line_speed = ELINK_SPEED_10000; sc->link_vars.link_status = (LINK_STATUS_LINK_UP | LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); } sc->link_vars.link_up = 1; sc->link_vars.duplex = DUPLEX_FULL; sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; if (IS_PF(sc)) { REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); bxe_stats_handle(sc, STATS_EVENT_LINK_UP); bxe_link_report(sc); } } if (IS_PF(sc)) { if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } else { bxe_stats_handle(sc, STATS_EVENT_STOP); } bxe_link_report(sc); } else { bxe_link_report(sc); bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } } static int bxe_initial_phy_init(struct bxe_softc *sc, int load_mode) { int rc, cfg_idx = bxe_get_link_cfg_idx(sc); uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; struct elink_params *lp = &sc->link_params; bxe_set_requested_fc(sc); if (CHIP_REV_IS_SLOW(sc)) { uint32_t bond = CHIP_BOND_ID(sc); uint32_t feat = 0; if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; } else if (bond & 0x4) { if (CHIP_IS_E3(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; } else { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; } } else if (bond & 0x8) { if (CHIP_IS_E3(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; } else { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; } } /* disable EMAC for E3 and above */ if (bond & 0x2) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; } sc->link_params.feature_config_flags |= feat; } bxe_acquire_phy_lock(sc); if (load_mode == LOAD_DIAG) { lp->loopback_mode = ELINK_LOOPBACK_XGXS; /* Prefer doing PHY loopback at 10G speed, if possible */ if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { if (lp->speed_cap_mask[cfg_idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; } else { lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; } } } if (load_mode == LOAD_LOOPBACK_EXT) { lp->loopback_mode = ELINK_LOOPBACK_EXT; } rc = elink_phy_init(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); bxe_calc_fc_adv(sc); if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); bxe_link_report(sc); } if (!CHIP_REV_IS_SLOW(sc)) { bxe_periodic_start(sc); } sc->link_params.req_line_speed[cfg_idx] = req_line_speed; return (rc); } /* must be called under IF_ADDR_LOCK */ static int bxe_set_mc_list(struct bxe_softc *sc) { struct ecore_mcast_ramrod_params rparam = { NULL }; int rc = 0; int mc_count = 0; int mcnt, i; struct ecore_mcast_list_elem *mc_mac, *mc_mac_start; unsigned char *mta; if_t ifp = sc->ifp; mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */ if (!mc_count) return (0); mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count, M_DEVBUF, M_NOWAIT); if(mta == NULL) { BLOGE(sc, "Failed to allocate temp mcast list\n"); return (-1); } bzero(mta, (sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count)); mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO)); mc_mac_start = mc_mac; if (!mc_mac) { free(mta, M_DEVBUF); BLOGE(sc, "Failed to allocate temp mcast list\n"); return (-1); } bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); /* mta and mcnt not expected to be different */ if_multiaddr_array(ifp, mta, &mcnt, mc_count); rparam.mcast_obj = &sc->mcast_obj; ECORE_LIST_INIT(&rparam.mcast_list); for(i=0; i< mcnt; i++) { mc_mac->mac = (uint8_t *)(mta + (i * ETHER_ADDR_LEN)); ECORE_LIST_PUSH_TAIL(&mc_mac->link, &rparam.mcast_list); BLOGD(sc, DBG_LOAD, "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n", mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]); mc_mac++; } rparam.mcast_list_len = mc_count; BXE_MCAST_LOCK(sc); /* first, clear all configured multicast MACs */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); BXE_MCAST_UNLOCK(sc); free(mc_mac_start, M_DEVBUF); free(mta, M_DEVBUF); return (rc); } /* Now add the new MACs */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); if (rc < 0) { BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); } BXE_MCAST_UNLOCK(sc); free(mc_mac_start, M_DEVBUF); free(mta, M_DEVBUF); return (rc); } static int bxe_set_uc_list(struct bxe_softc *sc) { if_t ifp = sc->ifp; struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; struct ifaddr *ifa; unsigned long ramrod_flags = 0; int rc; #if __FreeBSD_version < 800000 IF_ADDR_LOCK(ifp); #else if_addr_rlock(ifp); #endif /* first schedule a cleanup up of old configuration */ rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); if (rc < 0) { BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_addr_runlock(ifp); #endif return (rc); } ifa = if_getifaddr(ifp); /* XXX Is this structure */ while (ifa) { if (ifa->ifa_addr->sa_family != AF_LINK) { ifa = TAILQ_NEXT(ifa, ifa_link); continue; } rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); if (rc == -EEXIST) { BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); /* do not treat adding same MAC as an error */ rc = 0; } else if (rc < 0) { BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_addr_runlock(ifp); #endif return (rc); } ifa = TAILQ_NEXT(ifa, ifa_link); } #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_addr_runlock(ifp); #endif /* Execute the pending commands */ bit_set(&ramrod_flags, RAMROD_CONT); return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, ECORE_UC_LIST_MAC, &ramrod_flags)); } static void bxe_set_rx_mode(struct bxe_softc *sc) { if_t ifp = sc->ifp; uint32_t rx_mode = BXE_RX_MODE_NORMAL; if (sc->state != BXE_STATE_OPEN) { BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); return; } BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); if (if_getflags(ifp) & IFF_PROMISC) { rx_mode = BXE_RX_MODE_PROMISC; } else if ((if_getflags(ifp) & IFF_ALLMULTI) || ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && CHIP_IS_E1(sc))) { rx_mode = BXE_RX_MODE_ALLMULTI; } else { if (IS_PF(sc)) { /* some multicasts */ if (bxe_set_mc_list(sc) < 0) { rx_mode = BXE_RX_MODE_ALLMULTI; } if (bxe_set_uc_list(sc) < 0) { rx_mode = BXE_RX_MODE_PROMISC; } } } sc->rx_mode = rx_mode; /* schedule the rx_mode command */ if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); return; } if (IS_PF(sc)) { bxe_set_storm_rx_mode(sc); } } /* update flags in shmem */ static void bxe_update_drv_flags(struct bxe_softc *sc, uint32_t flags, uint32_t set) { uint32_t drv_flags; if (SHMEM2_HAS(sc, drv_flags)) { bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); drv_flags = SHMEM2_RD(sc, drv_flags); if (set) { SET_FLAGS(drv_flags, flags); } else { RESET_FLAGS(drv_flags, flags); } SHMEM2_WR(sc, drv_flags, drv_flags); BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); } } /* periodic timer callout routine, only runs when the interface is up */ static void bxe_periodic_callout_func(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; struct bxe_fastpath *fp; uint16_t tx_bd_avail; int i; if (!BXE_CORE_TRYLOCK(sc)) { /* just bail and try again next time */ if ((sc->state == BXE_STATE_OPEN) && (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { /* schedule the next periodic callout */ callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } return; } if ((sc->state != BXE_STATE_OPEN) || (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); BXE_CORE_UNLOCK(sc); return; } #if __FreeBSD_version >= 800000 FOR_EACH_QUEUE(sc, i) { fp = &sc->fp[i]; if (BXE_FP_TX_TRYLOCK(fp)) { if_t ifp = sc->ifp; /* * If interface was stopped due to unavailable * bds, try to process some tx completions */ (void) bxe_txeof(sc, fp); tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail >= BXE_TX_CLEANUP_THRESHOLD) { bxe_tx_mq_start_locked(sc, ifp, fp, NULL); } BXE_FP_TX_UNLOCK(fp); } } #else fp = &sc->fp[0]; if (BXE_FP_TX_TRYLOCK(fp)) { struct ifnet *ifp = sc->ifnet; /* * If interface was stopped due to unavailable * bds, try to process some tx completions */ (void) bxe_txeof(sc, fp); tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail >= BXE_TX_CLEANUP_THRESHOLD) { bxe_tx_start_locked(sc, ifp, fp); } BXE_FP_TX_UNLOCK(fp); } #endif /* #if __FreeBSD_version >= 800000 */ /* Check for TX timeouts on any fastpath. */ FOR_EACH_QUEUE(sc, i) { if (bxe_watchdog(sc, &sc->fp[i]) != 0) { /* Ruh-Roh, chip was reset! */ break; } } if (!CHIP_REV_IS_SLOW(sc)) { /* * This barrier is needed to ensure the ordering between the writing * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and * the reading here. */ mb(); if (sc->port.pmf) { bxe_acquire_phy_lock(sc); elink_period_func(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); } } if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { int mb_idx = SC_FW_MB_IDX(sc); uint32_t drv_pulse; uint32_t mcp_pulse; ++sc->fw_drv_pulse_wr_seq; sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; drv_pulse = sc->fw_drv_pulse_wr_seq; bxe_drv_pulse(sc); mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & MCP_PULSE_SEQ_MASK); /* * The delta between driver pulse and mcp response should * be 1 (before mcp response) or 0 (after mcp response). */ if ((drv_pulse != mcp_pulse) && (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { /* someone lost a heartbeat... */ BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", drv_pulse, mcp_pulse); } } /* state is BXE_STATE_OPEN */ bxe_stats_handle(sc, STATS_EVENT_UPDATE); BXE_CORE_UNLOCK(sc); if ((sc->state == BXE_STATE_OPEN) && (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { /* schedule the next periodic callout */ callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } } static void bxe_periodic_start(struct bxe_softc *sc) { atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } static void bxe_periodic_stop(struct bxe_softc *sc) { atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); callout_drain(&sc->periodic_callout); } /* start the controller */ static __noinline int bxe_nic_load(struct bxe_softc *sc, int load_mode) { uint32_t val; int load_code = 0; int i, rc = 0; BXE_CORE_LOCK_ASSERT(sc); BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); sc->state = BXE_STATE_OPENING_WAITING_LOAD; if (IS_PF(sc)) { /* must be called before memory allocation and HW init */ bxe_ilt_set_info(sc); } sc->last_reported_link_state = LINK_STATE_UNKNOWN; bxe_set_fp_rx_buf_size(sc); if (bxe_alloc_fp_buffers(sc) != 0) { BLOGE(sc, "Failed to allocate fastpath memory\n"); sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (bxe_alloc_mem(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (bxe_alloc_fw_stats_mem(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (IS_PF(sc)) { /* set pf load just before approaching the MCP */ bxe_set_pf_load(sc); /* if MCP exists send load request and analyze response */ if (!BXE_NOMCP(sc)) { /* attempt to load pf */ if (bxe_nic_load_request(sc, &load_code) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error1; } /* what did the MCP say? */ if (bxe_nic_load_analyze_req(sc, load_code) != 0) { bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } } else { BLOGI(sc, "Device has no MCP!\n"); load_code = bxe_nic_load_no_mcp(sc); } /* mark PMF if applicable */ bxe_nic_load_pmf(sc, load_code); /* Init Function state controlling object */ bxe_init_func_obj(sc); /* Initialize HW */ if (bxe_init_hw(sc, load_code) != 0) { BLOGE(sc, "HW init failed\n"); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } } /* set ALWAYS_ALIVE bit in shmem */ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bxe_drv_pulse(sc); sc->flags |= BXE_NO_PULSE; /* attach interrupts */ if (bxe_interrupt_attach(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } bxe_nic_init(sc, load_code); /* Init per-function objects */ if (IS_PF(sc)) { bxe_init_objs(sc); // XXX bxe_iov_nic_init(sc); /* set AFEX default VLAN tag to an invalid value */ sc->devinfo.mf_info.afex_def_vlan_tag = -1; // XXX bxe_nic_load_afex_dcc(sc, load_code); sc->state = BXE_STATE_OPENING_WAITING_PORT; rc = bxe_func_start(sc); if (rc) { BLOGE(sc, "Function start failed! rc = %d\n", rc); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } /* send LOAD_DONE command to MCP */ if (!BXE_NOMCP(sc)) { load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); sc->state = BXE_STATE_ERROR; rc = ENXIO; goto bxe_nic_load_error3; } } rc = bxe_setup_leading(sc); if (rc) { BLOGE(sc, "Setup leading failed! rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); if (rc) { BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } rc = bxe_init_rss_pf(sc); if (rc) { BLOGE(sc, "PF RSS init failed\n"); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } /* XXX VF */ /* now when Clients are configured we are ready to work */ sc->state = BXE_STATE_OPEN; /* Configure a ucast MAC */ if (IS_PF(sc)) { rc = bxe_set_eth_mac(sc, TRUE); } if (rc) { BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } if (sc->port.pmf) { rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); if (rc) { sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; /* start fast path */ /* Initialize Rx filter */ bxe_set_rx_mode(sc); /* start the Tx */ switch (/* XXX load_mode */LOAD_OPEN) { case LOAD_NORMAL: case LOAD_OPEN: break; case LOAD_DIAG: case LOAD_LOOPBACK_EXT: sc->state = BXE_STATE_DIAG; break; default: break; } if (sc->port.pmf) { bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); } else { bxe_link_status_update(sc); } /* start the periodic timer callout */ bxe_periodic_start(sc); if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { /* mark driver is loaded in shmem2 */ val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], (val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | DRV_FLAGS_CAPABILITIES_LOADED_L2)); } /* wait for all pending SP commands to complete */ if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); bxe_periodic_stop(sc); bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); return (ENXIO); } /* Tell the stack the driver is running! */ if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); return (0); bxe_nic_load_error3: if (IS_PF(sc)) { bxe_int_disable_sync(sc, 1); /* clean out queued objects */ bxe_squeeze_objects(sc); } bxe_interrupt_detach(sc); bxe_nic_load_error2: if (IS_PF(sc) && !BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); } sc->port.pmf = 0; bxe_nic_load_error1: /* clear pf_load status, as it was already set */ if (IS_PF(sc)) { bxe_clear_pf_load(sc); } bxe_nic_load_error0: bxe_free_fw_stats_mem(sc); bxe_free_fp_buffers(sc); bxe_free_mem(sc); return (rc); } static int bxe_init_locked(struct bxe_softc *sc) { int other_engine = SC_PATH(sc) ? 0 : 1; uint8_t other_load_status, load_status; uint8_t global = FALSE; int rc; BXE_CORE_LOCK_ASSERT(sc); /* check if the driver is already running */ if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); return (0); } bxe_set_power_state(sc, PCI_PM_D0); /* * If parity occurred during the unload, then attentions and/or * RECOVERY_IN_PROGRES may still be set. If so we want the first function * loaded on the current engine to complete the recovery. Parity recovery * is only relevant for PF driver. */ if (IS_PF(sc)) { other_load_status = bxe_get_load_status(sc, other_engine); load_status = bxe_get_load_status(sc, SC_PATH(sc)); if (!bxe_reset_is_done(sc, SC_PATH(sc)) || bxe_chk_parity_attn(sc, &global, TRUE)) { do { /* * If there are attentions and they are in global blocks, set * the GLOBAL_RESET bit regardless whether it will be this * function that will complete the recovery or not. */ if (global) { bxe_set_reset_global(sc); } /* * Only the first function on the current engine should try * to recover in open. In case of attentions in global blocks * only the first in the chip should try to recover. */ if ((!load_status && (!global || !other_load_status)) && bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { BLOGI(sc, "Recovered during init\n"); break; } /* recovery has failed... */ bxe_set_power_state(sc, PCI_PM_D3hot); sc->recovery_state = BXE_RECOVERY_FAILED; BLOGE(sc, "Recovery flow hasn't properly " "completed yet, try again later. " "If you still see this message after a " "few retries then power cycle is required.\n"); rc = ENXIO; goto bxe_init_locked_done; } while (0); } } sc->recovery_state = BXE_RECOVERY_DONE; rc = bxe_nic_load(sc, LOAD_OPEN); bxe_init_locked_done: if (rc) { /* Tell the stack the driver is NOT running! */ BLOGE(sc, "Initialization failed, " "stack notified driver is NOT running!\n"); if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); } return (rc); } static int bxe_stop_locked(struct bxe_softc *sc) { BXE_CORE_LOCK_ASSERT(sc); return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); } /* * Handles controller initialization when called from an unlocked routine. * ifconfig calls this function. * * Returns: * void */ static void bxe_init(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; BXE_CORE_LOCK(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } static int bxe_init_ifnet(struct bxe_softc *sc) { if_t ifp; int capabilities; /* ifconfig entrypoint for media type/status reporting */ ifmedia_init(&sc->ifmedia, IFM_IMASK, bxe_ifmedia_update, bxe_ifmedia_status); /* set the default interface values */ ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ /* allocate the ifnet structure */ if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { BLOGE(sc, "Interface allocation failed!\n"); return (ENXIO); } if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); if_setioctlfn(ifp, bxe_ioctl); if_setstartfn(ifp, bxe_tx_start); if_setgetcounterfn(ifp, bxe_get_counter); #if __FreeBSD_version >= 800000 if_settransmitfn(ifp, bxe_tx_mq_start); if_setqflushfn(ifp, bxe_mq_flush); #endif #ifdef FreeBSD8_0 if_settimer(ifp, 0); #endif if_setinitfn(ifp, bxe_init); if_setmtu(ifp, sc->mtu); if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)); capabilities = #if __FreeBSD_version < 700000 (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_LRO); #else (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWCSUM | IFCAP_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_LRO | IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_WOL_MAGIC); #endif if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ if_setbaudrate(ifp, IF_Gbps(10)); /* XXX */ if_setsendqlen(ifp, sc->tx_ring_size); if_setsendqready(ifp); /* XXX */ sc->ifp = ifp; /* attach to the Ethernet interface list */ ether_ifattach(ifp, sc->link_params.mac_addr); return (0); } static void bxe_deallocate_bars(struct bxe_softc *sc) { int i; for (i = 0; i < MAX_BARS; i++) { if (sc->bar[i].resource != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->bar[i].rid, sc->bar[i].resource); BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", i, PCIR_BAR(i)); } } } static int bxe_allocate_bars(struct bxe_softc *sc) { u_int flags; int i; memset(sc->bar, 0, sizeof(sc->bar)); for (i = 0; i < MAX_BARS; i++) { /* memory resources reside at BARs 0, 2, 4 */ /* Run `pciconf -lb` to see mappings */ if ((i != 0) && (i != 2) && (i != 4)) { continue; } sc->bar[i].rid = PCIR_BAR(i); flags = RF_ACTIVE; if (i == 0) { flags |= RF_SHAREABLE; } if ((sc->bar[i].resource = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->bar[i].rid, flags)) == NULL) { return (0); } sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%jd) -> %p\n", i, PCIR_BAR(i), (void *)rman_get_start(sc->bar[i].resource), (void *)rman_get_end(sc->bar[i].resource), rman_get_size(sc->bar[i].resource), (void *)sc->bar[i].kva); } return (0); } static void bxe_get_function_num(struct bxe_softc *sc) { uint32_t val = 0; /* * Read the ME register to get the function number. The ME register * holds the relative-function number and absolute-function number. The * absolute-function number appears only in E2 and above. Before that * these bits always contained zero, therefore we cannot blindly use them. */ val = REG_RD(sc, BAR_ME_REGISTER); sc->pfunc_rel = (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); sc->path_id = (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); } else { sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); } BLOGD(sc, DBG_LOAD, "Relative function %d, Absolute function %d, Path %d\n", sc->pfunc_rel, sc->pfunc_abs, sc->path_id); } static uint32_t bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) { uint32_t shmem2_size; uint32_t offset; uint32_t mf_cfg_offset_value; /* Non 57712 */ offset = (SHMEM_RD(sc, func_mb) + (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); /* 57712 plus */ if (sc->devinfo.shmem2_base != 0) { shmem2_size = SHMEM2_RD(sc, size); if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { offset = mf_cfg_offset_value; } } } return (offset); } static uint32_t bxe_pcie_capability_read(struct bxe_softc *sc, int reg, int width) { int pcie_reg; /* ensure PCIe capability is enabled */ if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { if (pcie_reg != 0) { BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); return (pci_read_config(sc->dev, (pcie_reg + reg), width)); } } BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); return (0); } static uint8_t bxe_is_pcie_pending(struct bxe_softc *sc) { return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & PCIM_EXP_STA_TRANSACTION_PND); } /* * Walk the PCI capabiites list for the device to find what features are * supported. These capabilites may be enabled/disabled by firmware so it's * best to walk the list rather than make assumptions. */ static void bxe_probe_pci_caps(struct bxe_softc *sc) { uint16_t link_status; int reg; /* check if PCI Power Management is enabled */ if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; } } link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); /* handle PCIe 2.0 workarounds for 57710 */ if (CHIP_IS_E1(sc)) { /* workaround for 57710 errata E4_57710_27462 */ sc->devinfo.pcie_link_speed = (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; /* workaround for 57710 errata E4_57710_27488 */ sc->devinfo.pcie_link_width = ((link_status & PCIM_LINK_STA_WIDTH) >> 4); if (sc->devinfo.pcie_link_speed > 1) { sc->devinfo.pcie_link_width = ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; } } else { sc->devinfo.pcie_link_speed = (link_status & PCIM_LINK_STA_SPEED); sc->devinfo.pcie_link_width = ((link_status & PCIM_LINK_STA_WIDTH) >> 4); } BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; /* check if MSI capability is enabled */ if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; } } /* check if MSI-X capability is enabled */ if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; } } } static int bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val; /* get the outer vlan if we're in switch-dependent mode */ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); mf_info->ext_id = (uint16_t)val; mf_info->multi_vnics_mode = 1; if (!VALID_OVLAN(mf_info->ext_id)) { BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); return (1); } /* get the capabilities */ if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_ISCSI) { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_FCOE) { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; } else { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; } mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static uint32_t bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) { uint32_t retval = 0; uint32_t val; val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { retval |= MF_PROTO_SUPPORT_ETHERNET; } if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { retval |= MF_PROTO_SUPPORT_ISCSI; } if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { retval |= MF_PROTO_SUPPORT_FCOE; } } return (retval); } static int bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val; /* * There is no outer vlan if we're in switch-independent mode. * If the mac is valid then assume multi-function. */ val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static int bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t e1hov_tag; uint32_t func_config; uint32_t niv_config; mf_info->multi_vnics_mode = 1; e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); mf_info->ext_id = (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> FUNC_MF_CFG_E1HOV_TAG_SHIFT); mf_info->default_vlan = (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> FUNC_MF_CFG_AFEX_VLAN_SHIFT); mf_info->niv_allowed_priorities = (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); mf_info->niv_default_cos = (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); mf_info->afex_vlan_mode = ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); mf_info->niv_mba_enabled = ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static int bxe_check_valid_mf_cfg(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t mf_cfg1; uint32_t mf_cfg2; uint32_t ovlan1; uint32_t ovlan2; uint8_t i, j; BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", SC_PORT(sc)); BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", mf_info->mf_config[SC_VN(sc)]); BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", mf_info->multi_vnics_mode); BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", mf_info->vnics_per_port); BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", mf_info->ext_id); BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", mf_info->min_bw[0], mf_info->min_bw[1], mf_info->min_bw[2], mf_info->min_bw[3]); BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", mf_info->max_bw[0], mf_info->max_bw[1], mf_info->max_bw[2], mf_info->max_bw[3]); BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", sc->mac_addr_str); /* various MF mode sanity checks... */ if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { BLOGE(sc, "Enumerated function %d is marked as hidden\n", SC_PORT(sc)); return (1); } if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", mf_info->vnics_per_port, mf_info->multi_vnics_mode); return (1); } if (mf_info->mf_mode == MULTI_FUNCTION_SD) { /* vnic id > 0 must have valid ovlan in switch-dependent mode */ if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", SC_VN(sc), OVLAN(sc)); return (1); } if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", mf_info->multi_vnics_mode, OVLAN(sc)); return (1); } /* * Verify all functions are either MF or SF mode. If MF, make sure * sure that all non-hidden functions have a valid ovlan. If SF, * make sure that all non-hidden functions have an invalid ovlan. */ FOREACH_ABS_FUNC_IN_PORT(sc, i) { mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { BLOGE(sc, "mf_mode=SD function %d MF config " "mismatch, multi_vnics_mode=%d ovlan=%d\n", i, mf_info->multi_vnics_mode, ovlan1); return (1); } } /* Verify all funcs on the same port each have a different ovlan. */ FOREACH_ABS_FUNC_IN_PORT(sc, i) { mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); /* iterate from the next function on the port to the max func */ for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan1) && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan2) && (ovlan1 == ovlan2)) { BLOGE(sc, "mf_mode=SD functions %d and %d " "have the same ovlan (%d)\n", i, j, ovlan1); return (1); } } } } /* MULTI_FUNCTION_SD */ return (0); } static int bxe_get_mf_cfg_info(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val, mac_upper; uint8_t i, vnic; /* initialize mf_info defaults */ mf_info->vnics_per_port = 1; mf_info->multi_vnics_mode = FALSE; mf_info->path_has_ovlan = FALSE; mf_info->mf_mode = SINGLE_FUNCTION; if (!CHIP_IS_MF_CAP(sc)) { return (0); } if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { BLOGE(sc, "Invalid mf_cfg_base!\n"); return (1); } /* get the MF mode (switch dependent / independent / single-function) */ val = SHMEM_RD(sc, dev_info.shared_feature_config.config); switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) { case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); /* check for legal upper mac bytes */ if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { mf_info->mf_mode = MULTI_FUNCTION_SI; } else { BLOGE(sc, "Invalid config for Switch Independent mode\n"); } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: /* get outer vlan configuration */ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { mf_info->mf_mode = MULTI_FUNCTION_SD; } else { BLOGE(sc, "Invalid config for Switch Dependent mode\n"); } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ return (0); case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: /* * Mark MF mode as NIV if MCP version includes NPAR-SD support * and the MAC address is valid. */ mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); if ((SHMEM2_HAS(sc, afex_driver_support)) && (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { mf_info->mf_mode = MULTI_FUNCTION_AFEX; } else { BLOGE(sc, "Invalid config for AFEX mode\n"); } break; default: BLOGE(sc, "Unknown MF mode (0x%08x)\n", (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); return (1); } /* set path mf_mode (which could be different than function mf_mode) */ if (mf_info->mf_mode == MULTI_FUNCTION_SD) { mf_info->path_has_ovlan = TRUE; } else if (mf_info->mf_mode == SINGLE_FUNCTION) { /* * Decide on path multi vnics mode. If we're not in MF mode and in * 4-port mode, this is good enough to check vnic-0 of the other port * on the same path */ if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { uint8_t other_port = !(PORT_ID(sc) & 1); uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; } } if (mf_info->mf_mode == SINGLE_FUNCTION) { /* invalid MF config */ if (SC_VN(sc) >= 1) { BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); return (1); } return (0); } /* get the MF configuration */ mf_info->mf_config[SC_VN(sc)] = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); switch(mf_info->mf_mode) { case MULTI_FUNCTION_SD: bxe_get_shmem_mf_cfg_info_sd(sc); break; case MULTI_FUNCTION_SI: bxe_get_shmem_mf_cfg_info_si(sc); break; case MULTI_FUNCTION_AFEX: bxe_get_shmem_mf_cfg_info_niv(sc); break; default: BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", mf_info->mf_mode); return (1); } /* get the congestion management parameters */ vnic = 0; FOREACH_ABS_FUNC_IN_PORT(sc, i) { /* get min/max bw */ val = MFCFG_RD(sc, func_mf_config[i].config); mf_info->min_bw[vnic] = ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); mf_info->max_bw[vnic] = ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); vnic++; } return (bxe_check_valid_mf_cfg(sc)); } static int bxe_get_shmem_info(struct bxe_softc *sc) { int port; uint32_t mac_hi, mac_lo, val; port = SC_PORT(sc); mac_hi = mac_lo = 0; sc->link_params.sc = sc; sc->link_params.port = port; /* get the hardware config info */ sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config); sc->devinfo.hw_config2 = SHMEM_RD(sc, dev_info.shared_hw_config.config2); sc->link_params.hw_led_mode = ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> SHARED_HW_CFG_LED_MODE_SHIFT); /* get the port feature config */ sc->port.config = SHMEM_RD(sc, dev_info.port_feature_config[port].config), /* get the link params */ sc->link_params.speed_cap_mask[0] = SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); sc->link_params.speed_cap_mask[1] = SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); /* get the lane config */ sc->link_params.lane_config = SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); /* get the link config */ val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); sc->port.link_config[ELINK_INT_PHY] = val; sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); sc->port.link_config[ELINK_EXT_PHY1] = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); /* get the override preemphasis flag and enable it or turn it off */ val = SHMEM_RD(sc, dev_info.shared_feature_config.config); if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; } else { sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; } /* get the initial value of the link params */ sc->link_params.multi_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); /* get external phy info */ sc->port.ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); /* get the multifunction configuration */ bxe_get_mf_cfg_info(sc); /* get the mac address */ if (IS_MF(sc)) { mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); } else { mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); } if ((mac_lo == 0) && (mac_hi == 0)) { *sc->mac_addr_str = 0; BLOGE(sc, "No Ethernet address programmed!\n"); } else { sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), "%02x:%02x:%02x:%02x:%02x:%02x", sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); } return (0); } static void bxe_get_tunable_params(struct bxe_softc *sc) { /* sanity checks */ if ((bxe_interrupt_mode != INTR_MODE_INTX) && (bxe_interrupt_mode != INTR_MODE_MSI) && (bxe_interrupt_mode != INTR_MODE_MSIX)) { BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); bxe_interrupt_mode = INTR_MODE_MSIX; } if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); bxe_queue_count = 0; } if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { if (bxe_max_rx_bufs == 0) { bxe_max_rx_bufs = RX_BD_USABLE; } else { BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); bxe_max_rx_bufs = 2048; } } if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); bxe_hc_rx_ticks = 25; } if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); bxe_hc_tx_ticks = 50; } if (bxe_max_aggregation_size == 0) { bxe_max_aggregation_size = TPA_AGG_SIZE; } if (bxe_max_aggregation_size > 0xffff) { BLOGW(sc, "invalid max_aggregation_size (%d)\n", bxe_max_aggregation_size); bxe_max_aggregation_size = TPA_AGG_SIZE; } if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); bxe_mrrs = -1; } if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); bxe_autogreeen = 0; } if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); bxe_udp_rss = 0; } /* pull in user settings */ sc->interrupt_mode = bxe_interrupt_mode; sc->max_rx_bufs = bxe_max_rx_bufs; sc->hc_rx_ticks = bxe_hc_rx_ticks; sc->hc_tx_ticks = bxe_hc_tx_ticks; sc->max_aggregation_size = bxe_max_aggregation_size; sc->mrrs = bxe_mrrs; sc->autogreeen = bxe_autogreeen; sc->udp_rss = bxe_udp_rss; if (bxe_interrupt_mode == INTR_MODE_INTX) { sc->num_queues = 1; } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ sc->num_queues = min((bxe_queue_count ? bxe_queue_count : mp_ncpus), MAX_RSS_CHAINS); if (sc->num_queues > mp_ncpus) { sc->num_queues = mp_ncpus; } } BLOGD(sc, DBG_LOAD, "User Config: " "debug=0x%lx " "interrupt_mode=%d " "queue_count=%d " "hc_rx_ticks=%d " "hc_tx_ticks=%d " "rx_budget=%d " "max_aggregation_size=%d " "mrrs=%d " "autogreeen=%d " "udp_rss=%d\n", bxe_debug, sc->interrupt_mode, sc->num_queues, sc->hc_rx_ticks, sc->hc_tx_ticks, bxe_rx_budget, sc->max_aggregation_size, sc->mrrs, sc->autogreeen, sc->udp_rss); } static int bxe_media_detect(struct bxe_softc *sc) { int port_type; uint32_t phy_idx = bxe_get_cur_phy_idx(sc); switch (sc->link_params.phy[phy_idx].media_type) { case ELINK_ETH_PHY_SFPP_10G_FIBER: case ELINK_ETH_PHY_XFP_FIBER: BLOGI(sc, "Found 10Gb Fiber media.\n"); sc->media = IFM_10G_SR; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_SFP_1G_FIBER: BLOGI(sc, "Found 1Gb Fiber media.\n"); sc->media = IFM_1000_SX; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_KR: case ELINK_ETH_PHY_CX4: BLOGI(sc, "Found 10GBase-CX4 media.\n"); sc->media = IFM_10G_CX4; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_DA_TWINAX: BLOGI(sc, "Found 10Gb Twinax media.\n"); sc->media = IFM_10G_TWINAX; port_type = PORT_DA; break; case ELINK_ETH_PHY_BASE_T: if (sc->link_params.speed_cap_mask[0] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { BLOGI(sc, "Found 10GBase-T media.\n"); sc->media = IFM_10G_T; port_type = PORT_TP; } else { BLOGI(sc, "Found 1000Base-T media.\n"); sc->media = IFM_1000_T; port_type = PORT_TP; } break; case ELINK_ETH_PHY_NOT_PRESENT: BLOGI(sc, "Media not present.\n"); sc->media = 0; port_type = PORT_OTHER; break; case ELINK_ETH_PHY_UNSPECIFIED: default: BLOGI(sc, "Unknown media!\n"); sc->media = 0; port_type = PORT_OTHER; break; } return port_type; } #define GET_FIELD(value, fname) \ (((value) & (fname##_MASK)) >> (fname##_SHIFT)) #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) static int bxe_get_igu_cam_info(struct bxe_softc *sc) { int pfid = SC_FUNC(sc); int igu_sb_id; uint32_t val; uint8_t fid, igu_sb_cnt = 0; sc->igu_base_sb = 0xff; if (CHIP_INT_MODE_IS_BC(sc)) { int vn = SC_VN(sc); igu_sb_cnt = sc->igu_sb_cnt; sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * FP_SB_MAX_E1x); sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); return (0); } /* IGU in normal mode - read CAM */ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { continue; } fid = IGU_FID(val); if ((fid & IGU_FID_ENCODE_IS_PF)) { if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { continue; } if (IGU_VEC(val) == 0) { /* default status block */ sc->igu_dsb_id = igu_sb_id; } else { if (sc->igu_base_sb == 0xff) { sc->igu_base_sb = igu_sb_id; } igu_sb_cnt++; } } } /* * Due to new PF resource allocation by MFW T7.4 and above, it's optional * that number of CAM entries will not be equal to the value advertised in * PCI. Driver should use the minimal value of both as the actual status * block count */ sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); if (igu_sb_cnt == 0) { BLOGE(sc, "CAM configuration error\n"); return (-1); } return (0); } /* * Gather various information from the device config space, the device itself, * shmem, and the user input. */ static int bxe_get_device_info(struct bxe_softc *sc) { uint32_t val; int rc; /* Get the data for the device */ sc->devinfo.vendor_id = pci_get_vendor(sc->dev); sc->devinfo.device_id = pci_get_device(sc->dev); sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); /* get the chip revision (chip metal comes from pci config space) */ sc->devinfo.chip_id = sc->link_params.chip_id = (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); /* force 57811 according to MISC register */ if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { if (CHIP_IS_57810(sc)) { sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | (sc->devinfo.chip_id & 0x0000ffff)); } else if (CHIP_IS_57810_MF(sc)) { sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | (sc->devinfo.chip_id & 0x0000ffff)); } sc->devinfo.chip_id |= 0x1; } BLOGD(sc, DBG_LOAD, "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", sc->devinfo.chip_id, ((sc->devinfo.chip_id >> 16) & 0xffff), ((sc->devinfo.chip_id >> 12) & 0xf), ((sc->devinfo.chip_id >> 4) & 0xff), ((sc->devinfo.chip_id >> 0) & 0xf)); val = (REG_RD(sc, 0x2874) & 0x55); if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1(sc) && val) || (CHIP_IS_E1H(sc) && (val == 0x55))) { sc->flags |= BXE_ONE_PORT_FLAG; BLOGD(sc, DBG_LOAD, "single port device\n"); } /* set the doorbell size */ sc->doorbell_size = (1 << BXE_DB_SHIFT); /* determine whether the device is in 2 port or 4 port mode */ sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ if (CHIP_IS_E2E3(sc)) { /* * Read port4mode_en_ovwr[0]: * If 1, four port mode is in port4mode_en_ovwr[1]. * If 0, four port mode is in port4mode_en[0]. */ val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); if (val & 1) { val = ((val >> 1) & 1); } else { val = REG_RD(sc, MISC_REG_PORT4MODE_EN); } sc->devinfo.chip_port_mode = (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); } /* get the function and path info for the device */ bxe_get_function_num(sc); /* get the shared memory base address */ sc->devinfo.shmem_base = sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); sc->devinfo.shmem2_base = REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", sc->devinfo.shmem_base, sc->devinfo.shmem2_base); if (!sc->devinfo.shmem_base) { /* this should ONLY prevent upcoming shmem reads */ BLOGI(sc, "MCP not active\n"); sc->flags |= BXE_NO_MCP_FLAG; return (0); } /* make sure the shared memory contents are valid */ val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); return (0); } BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); /* get the bootcode version */ sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); snprintf(sc->devinfo.bc_ver_str, sizeof(sc->devinfo.bc_ver_str), "%d.%d.%d", ((sc->devinfo.bc_ver >> 24) & 0xff), ((sc->devinfo.bc_ver >> 16) & 0xff), ((sc->devinfo.bc_ver >> 8) & 0xff)); BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); /* get the bootcode shmem address */ sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); /* clean indirect addresses as they're not used */ pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); if (IS_PF(sc)) { REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); if (CHIP_IS_E1x(sc)) { REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); } /* * Enable internal target-read (in case we are probed after PF * FLR). Must be done prior to any BAR read access. Only for * 57712 and up */ if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); } } /* get the nvram size */ val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); sc->devinfo.flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); /* get PCI capabilites */ bxe_probe_pci_caps(sc); bxe_set_power_state(sc, PCI_PM_D0); /* get various configuration parameters from shmem */ bxe_get_shmem_info(sc); if (sc->devinfo.pcie_msix_cap_reg != 0) { val = pci_read_config(sc->dev, (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), 2); sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); } else { sc->igu_sb_cnt = 1; } sc->igu_base_addr = BAR_IGU_INTMEM; /* initialize IGU parameters */ if (CHIP_IS_E1x(sc)) { sc->devinfo.int_block = INT_BLOCK_HC; sc->igu_dsb_id = DEF_SB_IGU_ID; sc->igu_base_sb = 0; } else { sc->devinfo.int_block = INT_BLOCK_IGU; /* do not allow device reset during IGU info preocessing */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { int tout = 5000; BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { tout--; DELAY(1000); } if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); return (-1); } } if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; } else { BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); } rc = bxe_get_igu_cam_info(sc); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); if (rc) { return (rc); } } /* * Get base FW non-default (fast path) status block ID. This value is * used to initialize the fw_sb_id saved on the fp/queue structure to * determine the id used by the FW. */ if (CHIP_IS_E1x(sc)) { sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); } else { /* * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of * the same queue are indicated on the same IGU SB). So we prefer * FW and IGU SBs to be the same value. */ sc->base_fw_ndsb = sc->igu_base_sb; } BLOGD(sc, DBG_LOAD, "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", sc->igu_dsb_id, sc->igu_base_sb, sc->igu_sb_cnt, sc->base_fw_ndsb); elink_phy_probe(&sc->link_params); return (0); } static void bxe_link_settings_supported(struct bxe_softc *sc, uint32_t switch_cfg) { uint32_t cfg_size = 0; uint32_t idx; uint8_t port = SC_PORT(sc); /* aggregation of supported attributes of all external phys */ sc->port.supported[0] = 0; sc->port.supported[1] = 0; switch (sc->link_params.num_phys) { case 1: sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; cfg_size = 1; break; case 2: sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; cfg_size = 1; break; case 3: if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { sc->port.supported[1] = sc->link_params.phy[ELINK_EXT_PHY1].supported; sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY2].supported; } else { sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; sc->port.supported[1] = sc->link_params.phy[ELINK_EXT_PHY2].supported; } cfg_size = 2; break; } if (!(sc->port.supported[0] || sc->port.supported[1])) { BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config), SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config2)); return; } if (CHIP_IS_E3(sc)) sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); else { switch (switch_cfg) { case ELINK_SWITCH_CFG_1G: sc->port.phy_addr = REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); break; case ELINK_SWITCH_CFG_10G: sc->port.phy_addr = REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); break; default: BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", sc->port.link_config[0]); return; } } BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); /* mask what we support according to speed_cap_mask per configuration */ for (idx = 0; idx < cfg_size; idx++) { if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; } } BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", sc->port.supported[0], sc->port.supported[1]); } static void bxe_link_settings_requested(struct bxe_softc *sc) { uint32_t link_config; uint32_t idx; uint32_t cfg_size = 0; sc->port.advertising[0] = 0; sc->port.advertising[1] = 0; switch (sc->link_params.num_phys) { case 1: case 2: cfg_size = 1; break; case 3: cfg_size = 2; break; } for (idx = 0; idx < cfg_size; idx++) { sc->link_params.req_duplex[idx] = DUPLEX_FULL; link_config = sc->port.link_config[idx]; switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_AUTO: if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; sc->port.advertising[idx] |= sc->port.supported[idx]; if (sc->link_params.phy[ELINK_EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) sc->port.advertising[idx] |= (ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full); } else { /* force 10G, no AN */ sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); continue; } break; case PORT_FEATURE_LINK_SPEED_10M_FULL: if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10M_HALF: if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_FULL: if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_HALF: if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_1G: if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_2_5G: if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10G_CX4: if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_20G: sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; break; default: BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; sc->port.advertising[idx] = sc->port.supported[idx]; break; } sc->link_params.req_flow_ctrl[idx] = (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; } else { bxe_set_requested_fc(sc); } } BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " "req_flow_ctrl=0x%x advertising=0x%x\n", sc->link_params.req_line_speed[idx], sc->link_params.req_duplex[idx], sc->link_params.req_flow_ctrl[idx], sc->port.advertising[idx]); } } static void bxe_get_phy_info(struct bxe_softc *sc) { uint8_t port = SC_PORT(sc); uint32_t config = sc->port.config; uint32_t eee_mode; /* shmem data already read in bxe_get_shmem_info() */ BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " "link_config0=0x%08x\n", sc->link_params.lane_config, sc->link_params.speed_cap_mask[0], sc->port.link_config[0]); bxe_link_settings_supported(sc, sc->link_params.switch_cfg); bxe_link_settings_requested(sc); if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } /* configure link feature according to nvram value */ eee_mode = (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | ELINK_EEE_MODE_ENABLE_LPI | ELINK_EEE_MODE_OUTPUT_TIME); } else { sc->link_params.eee_mode = 0; } /* get the media type */ bxe_media_detect(sc); } static void bxe_get_params(struct bxe_softc *sc) { /* get user tunable params */ bxe_get_tunable_params(sc); /* select the RX and TX ring sizes */ sc->tx_ring_size = TX_BD_USABLE; sc->rx_ring_size = RX_BD_USABLE; /* XXX disable WoL */ sc->wol = 0; } static void bxe_set_modes_bitmap(struct bxe_softc *sc) { uint32_t flags = 0; if (CHIP_REV_IS_FPGA(sc)) { SET_FLAGS(flags, MODE_FPGA); } else if (CHIP_REV_IS_EMUL(sc)) { SET_FLAGS(flags, MODE_EMUL); } else { SET_FLAGS(flags, MODE_ASIC); } if (CHIP_IS_MODE_4_PORT(sc)) { SET_FLAGS(flags, MODE_PORT4); } else { SET_FLAGS(flags, MODE_PORT2); } if (CHIP_IS_E2(sc)) { SET_FLAGS(flags, MODE_E2); } else if (CHIP_IS_E3(sc)) { SET_FLAGS(flags, MODE_E3); if (CHIP_REV(sc) == CHIP_REV_Ax) { SET_FLAGS(flags, MODE_E3_A0); } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); } } if (IS_MF(sc)) { SET_FLAGS(flags, MODE_MF); switch (sc->devinfo.mf_info.mf_mode) { case MULTI_FUNCTION_SD: SET_FLAGS(flags, MODE_MF_SD); break; case MULTI_FUNCTION_SI: SET_FLAGS(flags, MODE_MF_SI); break; case MULTI_FUNCTION_AFEX: SET_FLAGS(flags, MODE_MF_AFEX); break; } } else { SET_FLAGS(flags, MODE_SF); } #if defined(__LITTLE_ENDIAN) SET_FLAGS(flags, MODE_LITTLE_ENDIAN); #else /* __BIG_ENDIAN */ SET_FLAGS(flags, MODE_BIG_ENDIAN); #endif INIT_MODE_FLAGS(sc) = flags; } static int bxe_alloc_hsi_mem(struct bxe_softc *sc) { struct bxe_fastpath *fp; bus_addr_t busaddr; int max_agg_queues; int max_segments; bus_size_t max_size; bus_size_t max_seg_size; char buf[32]; int rc; int i, j; /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ /* allocate the parent bus DMA tag */ rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ BUS_SPACE_MAXSIZE_32BIT, /* max map size */ BUS_SPACE_UNRESTRICTED, /* num discontinuous */ BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &sc->parent_dma_tag); /* returned dma tag */ if (rc != 0) { BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); return (1); } /************************/ /* DEFAULT STATUS BLOCK */ /************************/ if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), &sc->def_sb_dma, "default status block") != 0) { /* XXX */ bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; /***************/ /* EVENT QUEUE */ /***************/ if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, &sc->eq_dma, "event queue") != 0) { /* XXX */ bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; /*************/ /* SLOW PATH */ /*************/ if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), &sc->sp_dma, "slow path") != 0) { /* XXX */ bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; /*******************/ /* SLOW PATH QUEUE */ /*******************/ if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, &sc->spq_dma, "slow path queue") != 0) { /* XXX */ bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; /***************************/ /* FW DECOMPRESSION BUFFER */ /***************************/ if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, "fw decompression buffer") != 0) { /* XXX */ bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; if ((sc->gz_strm = malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { /* XXX */ bxe_dma_free(sc, &sc->gz_buf_dma); sc->gz_buf = NULL; bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } /*************/ /* FASTPATHS */ /*************/ /* allocate DMA memory for each fastpath structure */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->sc = sc; fp->index = i; /*******************/ /* FP STATUS BLOCK */ /*******************/ snprintf(buf, sizeof(buf), "fp %d status block", i); if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), &fp->sb_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { if (CHIP_IS_E2E3(sc)) { fp->status_block.e2_sb = (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; } else { fp->status_block.e1x_sb = (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; } } /******************/ /* FP TX BD CHAIN */ /******************/ snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), &fp->tx_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; } /* link together the tx bd chain pages */ for (j = 1; j <= TX_BD_NUM_PAGES; j++) { /* index into the tx bd chain array to last entry per page */ struct eth_tx_next_bd *tx_next_bd = &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; /* point to the next page and wrap from last page */ busaddr = (fp->tx_dma.paddr + (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); } /******************/ /* FP RX BD CHAIN */ /******************/ snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), &fp->rx_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; } /* link together the rx bd chain pages */ for (j = 1; j <= RX_BD_NUM_PAGES; j++) { /* index into the rx bd chain array to last entry per page */ struct eth_rx_bd *rx_bd = &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; /* point to the next page and wrap from last page */ busaddr = (fp->rx_dma.paddr + (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); rx_bd->addr_hi = htole32(U64_HI(busaddr)); rx_bd->addr_lo = htole32(U64_LO(busaddr)); } /*******************/ /* FP RX RCQ CHAIN */ /*******************/ snprintf(buf, sizeof(buf), "fp %d rcq chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), &fp->rcq_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; } /* link together the rcq chain pages */ for (j = 1; j <= RCQ_NUM_PAGES; j++) { /* index into the rcq chain array to last entry per page */ struct eth_rx_cqe_next_page *rx_cqe_next = (struct eth_rx_cqe_next_page *) &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; /* point to the next page and wrap from last page */ busaddr = (fp->rcq_dma.paddr + (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); } /*******************/ /* FP RX SGE CHAIN */ /*******************/ snprintf(buf, sizeof(buf), "fp %d sge chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), &fp->rx_sge_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; } /* link together the sge chain pages */ for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { /* index into the rcq chain array to last entry per page */ struct eth_rx_sge *rx_sge = &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; /* point to the next page and wrap from last page */ busaddr = (fp->rx_sge_dma.paddr + (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); rx_sge->addr_hi = htole32(U64_HI(busaddr)); rx_sge->addr_lo = htole32(U64_LO(busaddr)); } /***********************/ /* FP TX MBUF DMA MAPS */ /***********************/ /* set required sizes before mapping to conserve resources */ if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { max_size = BXE_TSO_MAX_SIZE; max_segments = BXE_TSO_MAX_SEGMENTS; max_seg_size = BXE_TSO_MAX_SEG_SIZE; } else { max_size = (MCLBYTES * BXE_MAX_SEGMENTS); max_segments = BXE_MAX_SEGMENTS; max_seg_size = MCLBYTES; } /* create a dma tag for the tx mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ max_size, /* max map size */ max_segments, /* num discontinuous */ max_seg_size, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->tx_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d tx mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for each of the tx mbuf clusters */ for (j = 0; j < TX_BD_TOTAL; j++) { if (bus_dmamap_create(fp->tx_mbuf_tag, BUS_DMA_NOWAIT, &fp->tx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d tx mbuf %d' (%d)\n", i, j, rc); return (1); } } /***********************/ /* FP RX MBUF DMA MAPS */ /***********************/ /* create a dma tag for the rx mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ MJUM9BYTES, /* max map size */ 1, /* num discontinuous */ MJUM9BYTES, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->rx_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d rx mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for each of the rx mbuf clusters */ for (j = 0; j < RX_BD_TOTAL; j++) { if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx mbuf cluster */ if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx mbuf' (%d)\n", i, rc); return (1); } /***************************/ /* FP RX SGE MBUF DMA MAPS */ /***************************/ /* create a dma tag for the rx sge mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ BCM_PAGE_SIZE, /* max map size */ 1, /* num discontinuous */ BCM_PAGE_SIZE, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->rx_sge_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d rx sge mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for the rx sge mbuf clusters */ for (j = 0; j < RX_SGE_TOTAL; j++) { if (bus_dmamap_create(fp->rx_sge_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_sge_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx sge mbuf cluster */ if (bus_dmamap_create(fp->rx_sge_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_sge_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx sge mbuf' (%d)\n", i, rc); return (1); } /***************************/ /* FP RX TPA MBUF DMA MAPS */ /***************************/ /* create dma maps for the rx tpa mbuf clusters */ max_agg_queues = MAX_AGG_QS(sc); for (j = 0; j < max_agg_queues; j++) { if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_tpa_info[j].bd.m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx tpa mbuf cluster */ if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_tpa_info_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx tpa mbuf' (%d)\n", i, rc); return (1); } bxe_init_sge_ring_bit_mask(fp); } return (0); } static void bxe_free_hsi_mem(struct bxe_softc *sc) { struct bxe_fastpath *fp; int max_agg_queues; int i, j; if (sc->parent_dma_tag == NULL) { return; /* assume nothing was allocated */ } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; /*******************/ /* FP STATUS BLOCK */ /*******************/ bxe_dma_free(sc, &fp->sb_dma); memset(&fp->status_block, 0, sizeof(fp->status_block)); /******************/ /* FP TX BD CHAIN */ /******************/ bxe_dma_free(sc, &fp->tx_dma); fp->tx_chain = NULL; /******************/ /* FP RX BD CHAIN */ /******************/ bxe_dma_free(sc, &fp->rx_dma); fp->rx_chain = NULL; /*******************/ /* FP RX RCQ CHAIN */ /*******************/ bxe_dma_free(sc, &fp->rcq_dma); fp->rcq_chain = NULL; /*******************/ /* FP RX SGE CHAIN */ /*******************/ bxe_dma_free(sc, &fp->rx_sge_dma); fp->rx_sge_chain = NULL; /***********************/ /* FP TX MBUF DMA MAPS */ /***********************/ if (fp->tx_mbuf_tag != NULL) { for (j = 0; j < TX_BD_TOTAL; j++) { if (fp->tx_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->tx_mbuf_tag, fp->tx_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->tx_mbuf_tag, fp->tx_mbuf_chain[j].m_map); } } bus_dma_tag_destroy(fp->tx_mbuf_tag); fp->tx_mbuf_tag = NULL; } /***********************/ /* FP RX MBUF DMA MAPS */ /***********************/ if (fp->rx_mbuf_tag != NULL) { for (j = 0; j < RX_BD_TOTAL; j++) { if (fp->rx_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_chain[j].m_map); } } if (fp->rx_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); } /***************************/ /* FP RX TPA MBUF DMA MAPS */ /***************************/ max_agg_queues = MAX_AGG_QS(sc); for (j = 0; j < max_agg_queues; j++) { if (fp->rx_tpa_info[j].bd.m_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info[j].bd.m_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_tpa_info[j].bd.m_map); } } if (fp->rx_tpa_info_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map); } bus_dma_tag_destroy(fp->rx_mbuf_tag); fp->rx_mbuf_tag = NULL; } /***************************/ /* FP RX SGE MBUF DMA MAPS */ /***************************/ if (fp->rx_sge_mbuf_tag != NULL) { for (j = 0; j < RX_SGE_TOTAL; j++) { if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[j].m_map); } } if (fp->rx_sge_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map); bus_dmamap_destroy(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map); } bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); fp->rx_sge_mbuf_tag = NULL; } } /***************************/ /* FW DECOMPRESSION BUFFER */ /***************************/ bxe_dma_free(sc, &sc->gz_buf_dma); sc->gz_buf = NULL; free(sc->gz_strm, M_DEVBUF); sc->gz_strm = NULL; /*******************/ /* SLOW PATH QUEUE */ /*******************/ bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; /*************/ /* SLOW PATH */ /*************/ bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; /***************/ /* EVENT QUEUE */ /***************/ bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; /************************/ /* DEFAULT STATUS BLOCK */ /************************/ bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); sc->parent_dma_tag = NULL; } /* * Previous driver DMAE transaction may have occurred when pre-boot stage * ended and boot began. This would invalidate the addresses of the * transaction, resulting in was-error bit set in the PCI causing all * hw-to-host PCIe transactions to timeout. If this happened we want to clear * the interrupt which detected this from the pglueb and the was-done bit */ static void bxe_prev_interrupted_dmae(struct bxe_softc *sc) { uint32_t val; if (!CHIP_IS_E1x(sc)) { val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { BLOGD(sc, DBG_LOAD, "Clearing 'was-error' bit that was set in pglueb"); REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); } } } static int bxe_prev_mcp_done(struct bxe_softc *sc) { uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); if (!rc) { BLOGE(sc, "MCP response failure, aborting\n"); return (-1); } return (0); } static struct bxe_prev_list_node * bxe_prev_path_get_entry(struct bxe_softc *sc) { struct bxe_prev_list_node *tmp; LIST_FOREACH(tmp, &bxe_prev_list, node) { if ((sc->pcie_bus == tmp->bus) && (sc->pcie_device == tmp->slot) && (SC_PATH(sc) == tmp->path)) { return (tmp); } } return (NULL); } static uint8_t bxe_prev_is_path_marked(struct bxe_softc *sc) { struct bxe_prev_list_node *tmp; int rc = FALSE; mtx_lock(&bxe_prev_mtx); tmp = bxe_prev_path_get_entry(sc); if (tmp) { if (tmp->aer) { BLOGD(sc, DBG_LOAD, "Path %d/%d/%d was marked by AER\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } else { rc = TRUE; BLOGD(sc, DBG_LOAD, "Path %d/%d/%d was already cleaned from previous drivers\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } } mtx_unlock(&bxe_prev_mtx); return (rc); } static int bxe_prev_mark_path(struct bxe_softc *sc, uint8_t after_undi) { struct bxe_prev_list_node *tmp; mtx_lock(&bxe_prev_mtx); /* Check whether the entry for this path already exists */ tmp = bxe_prev_path_get_entry(sc); if (tmp) { if (!tmp->aer) { BLOGD(sc, DBG_LOAD, "Re-marking AER in path %d/%d/%d\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } else { BLOGD(sc, DBG_LOAD, "Removing AER indication from path %d/%d/%d\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); tmp->aer = 0; } mtx_unlock(&bxe_prev_mtx); return (0); } mtx_unlock(&bxe_prev_mtx); /* Create an entry for this path and add it */ tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, (M_NOWAIT | M_ZERO)); if (!tmp) { BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); return (-1); } tmp->bus = sc->pcie_bus; tmp->slot = sc->pcie_device; tmp->path = SC_PATH(sc); tmp->aer = 0; tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; mtx_lock(&bxe_prev_mtx); BLOGD(sc, DBG_LOAD, "Marked path %d/%d/%d - finished previous unload\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); mtx_unlock(&bxe_prev_mtx); return (0); } static int bxe_do_flr(struct bxe_softc *sc) { int i; /* only E2 and onwards support FLR */ if (CHIP_IS_E1x(sc)) { BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); return (-1); } /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", sc->devinfo.bc_ver); return (-1); } /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { if (i) { DELAY(((1 << (i - 1)) * 100) * 1000); } if (!bxe_is_pcie_pending(sc)) { goto clear; } } BLOGE(sc, "PCIE transaction is not cleared, " "proceeding with reset anyway\n"); clear: BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); return (0); } struct bxe_mac_vals { uint32_t xmac_addr; uint32_t xmac_val; uint32_t emac_addr; uint32_t emac_val; uint32_t umac_addr; uint32_t umac_val; uint32_t bmac_addr; uint32_t bmac_val[2]; }; static void bxe_prev_unload_close_mac(struct bxe_softc *sc, struct bxe_mac_vals *vals) { uint32_t val, base_addr, offset, mask, reset_reg; uint8_t mac_stopped = FALSE; uint8_t port = SC_PORT(sc); uint32_t wb_data[2]; /* reset addresses as they also mark which values were changed */ vals->bmac_addr = 0; vals->umac_addr = 0; vals->xmac_addr = 0; vals->emac_addr = 0; reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); if (!CHIP_IS_E3(sc)) { val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; if ((mask & reset_reg) && val) { BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL : BIGMAC_REGISTER_BMAC_CONTROL; /* * use rd/wr since we cannot use dmae. This is safe * since MCP won't access the bus due to the request * to unload, and no function on the path can be * loaded at this time. */ wb_data[0] = REG_RD(sc, base_addr + offset); wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); vals->bmac_addr = base_addr + offset; vals->bmac_val[0] = wb_data[0]; vals->bmac_val[1] = wb_data[1]; wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; REG_WR(sc, vals->bmac_addr, wb_data[0]); REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); } BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; vals->emac_val = REG_RD(sc, vals->emac_addr); REG_WR(sc, vals->emac_addr, 0); mac_stopped = TRUE; } else { if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); vals->xmac_addr = base_addr + XMAC_REG_CTRL; vals->xmac_val = REG_RD(sc, vals->xmac_addr); REG_WR(sc, vals->xmac_addr, 0); mac_stopped = TRUE; } mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; if (mask & reset_reg) { BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; vals->umac_val = REG_RD(sc, vals->umac_addr); REG_WR(sc, vals->umac_addr, 0); mac_stopped = TRUE; } } if (mac_stopped) { DELAY(20000); } } #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) static void bxe_prev_unload_undi_inc(struct bxe_softc *sc, uint8_t port, uint8_t inc) { uint16_t rcq, bd; uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); BLOGD(sc, DBG_LOAD, "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", port, bd, rcq); } static int bxe_prev_unload_common(struct bxe_softc *sc) { uint32_t reset_reg, tmp_reg = 0, rc; uint8_t prev_undi = FALSE; struct bxe_mac_vals mac_vals; uint32_t timer_count = 1000; uint32_t prev_brb; /* * It is possible a previous function received 'common' answer, * but hasn't loaded yet, therefore creating a scenario of * multiple functions receiving 'common' on the same path. */ BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); memset(&mac_vals, 0, sizeof(mac_vals)); if (bxe_prev_is_path_marked(sc)) { return (bxe_prev_mcp_done(sc)); } reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); /* Reset should be performed after BRB is emptied */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { /* Close the MAC Rx to prevent BRB from filling up */ bxe_prev_unload_close_mac(sc, &mac_vals); /* close LLH filters towards the BRB */ elink_set_rx_filter(&sc->link_params, 0); /* * Check if the UNDI driver was previously loaded. * UNDI driver initializes CID offset for normal bell to 0x7 */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); if (tmp_reg == 0x7) { BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); prev_undi = TRUE; /* clear the UNDI indication */ REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); /* clear possible idle check errors */ REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); } } /* wait until BRB is empty */ tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); while (timer_count) { prev_brb = tmp_reg; tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); if (!tmp_reg) { break; } BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); /* reset timer as long as BRB actually gets emptied */ if (prev_brb > tmp_reg) { timer_count = 1000; } else { timer_count--; } /* If UNDI resides in memory, manually increment it */ if (prev_undi) { bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); } DELAY(10); } if (!timer_count) { BLOGE(sc, "Failed to empty BRB\n"); } } /* No packets are in the pipeline, path is ready for reset */ bxe_reset_common(sc); if (mac_vals.xmac_addr) { REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); } if (mac_vals.umac_addr) { REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); } if (mac_vals.emac_addr) { REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); } if (mac_vals.bmac_addr) { REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); } rc = bxe_prev_mark_path(sc, prev_undi); if (rc) { bxe_prev_mcp_done(sc); return (rc); } return (bxe_prev_mcp_done(sc)); } static int bxe_prev_unload_uncommon(struct bxe_softc *sc) { int rc; BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); /* Test if previous unload process was already finished for this path */ if (bxe_prev_is_path_marked(sc)) { return (bxe_prev_mcp_done(sc)); } BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); /* * If function has FLR capabilities, and existing FW version matches * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); if (!rc) { /* fw version is good */ BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); rc = bxe_do_flr(sc); } if (!rc) { /* FLR was performed */ BLOGD(sc, DBG_LOAD, "FLR successful\n"); return (0); } BLOGD(sc, DBG_LOAD, "Could not FLR\n"); /* Close the MCP request, return failure*/ rc = bxe_prev_mcp_done(sc); if (!rc) { rc = BXE_PREV_WAIT_NEEDED; } return (rc); } static int bxe_prev_unload(struct bxe_softc *sc) { int time_counter = 10; uint32_t fw, hw_lock_reg, hw_lock_val; uint32_t rc = 0; /* * Clear HW from errors which may have resulted from an interrupted * DMAE transaction. */ bxe_prev_interrupted_dmae(sc); /* Release previously held locks */ hw_lock_reg = (SC_FUNC(sc) <= 5) ? (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); hw_lock_val = (REG_RD(sc, hw_lock_reg)); if (hw_lock_val) { if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); } BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); REG_WR(sc, hw_lock_reg, 0xffffffff); } else { BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); } if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); } do { /* Lock MCP using an unload request */ fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); if (!fw) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; break; } if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { rc = bxe_prev_unload_common(sc); break; } /* non-common reply from MCP night require looping */ rc = bxe_prev_unload_uncommon(sc); if (rc != BXE_PREV_WAIT_NEEDED) { break; } DELAY(20000); } while (--time_counter); if (!time_counter || rc) { BLOGE(sc, "Failed to unload previous driver!" " time_counter %d rc %d\n", time_counter, rc); rc = -1; } return (rc); } void bxe_dcbx_set_state(struct bxe_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled) { if (!CHIP_IS_E1x(sc)) { sc->dcb_state = dcb_on; sc->dcbx_enabled = dcbx_enabled; } else { sc->dcb_state = FALSE; sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; } BLOGD(sc, DBG_LOAD, "DCB state [%s:%s]\n", dcb_on ? "ON" : "OFF", (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? "on-chip with negotiation" : "invalid"); } /* must be called after sriov-enable */ static int bxe_set_qm_cid_count(struct bxe_softc *sc) { int cid_count = BXE_L2_MAX_CID(sc); if (IS_SRIOV(sc)) { cid_count += BXE_VF_CIDS; } if (CNIC_SUPPORT(sc)) { cid_count += CNIC_CID_MAX; } return (roundup(cid_count, QM_CID_ROUND)); } static void bxe_init_multi_cos(struct bxe_softc *sc) { int pri, cos; uint32_t pri_map = 0; /* XXX change to user config */ for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); if (cos < sc->max_cos) { sc->prio_to_cos[pri] = cos; } else { BLOGW(sc, "Invalid COS %d for priority %d " "(max COS is %d), setting to 0\n", cos, pri, (sc->max_cos - 1)); sc->prio_to_cos[pri] = 0; } } } static int bxe_sysctl_state(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc; int error, result; result = 0; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) { return (error); } if (result == 1) { uint32_t temp; sc = (struct bxe_softc *)arg1; BLOGI(sc, "... dumping driver state ...\n"); temp = SHMEM2_RD(sc, temperature_in_half_celsius); BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); } return (error); } static int +bxe_sysctl_trigger_grcdump(SYSCTL_HANDLER_ARGS) +{ + struct bxe_softc *sc; + int error, result; + + result = 0; + error = sysctl_handle_int(oidp, &result, 0, req); + + if (error || !req->newptr) { + return (error); + } + + if (result == 1) { + sc = (struct bxe_softc *)arg1; + + BLOGI(sc, "... grcdump start ...\n"); + bxe_grc_dump(sc); + BLOGI(sc, "... grcdump done ...\n"); + } + + return (error); +} + +static int bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; uint32_t *offset; uint64_t value = 0; int index = (int)arg2; if (index >= BXE_NUM_ETH_STATS) { BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); return (-1); } offset = (eth_stats + bxe_eth_stats_arr[index].offset); switch (bxe_eth_stats_arr[index].size) { case 4: value = (uint64_t)*offset; break; case 8: value = HILO_U64(*offset, *(offset + 1)); break; default: BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", index, bxe_eth_stats_arr[index].size); return (-1); } return (sysctl_handle_64(oidp, &value, 0, req)); } static int bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t *eth_stats; uint32_t *offset; uint64_t value = 0; uint32_t q_stat = (uint32_t)arg2; uint32_t fp_index = ((q_stat >> 16) & 0xffff); uint32_t index = (q_stat & 0xffff); eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; if (index >= BXE_NUM_ETH_Q_STATS) { BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); return (-1); } offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); switch (bxe_eth_q_stats_arr[index].size) { case 4: value = (uint64_t)*offset; break; case 8: value = HILO_U64(*offset, *(offset + 1)); break; default: BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", index, bxe_eth_q_stats_arr[index].size); return (-1); } return (sysctl_handle_64(oidp, &value, 0, req)); } static void bxe_add_sysctls(struct bxe_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *queue_top, *queue; struct sysctl_oid_list *queue_top_children, *queue_children; char queue_num_buf[32]; uint32_t q_stat; int i, j; ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", CTLFLAG_RD, BXE_DRIVER_VERSION, 0, "version"); snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION); snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : "Unknown")); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, "multifunction vnics per port"); snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : "???GT/s"), sc->devinfo.pcie_link_width); sc->debug = bxe_debug; #if __FreeBSD_version >= 900000 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, "bootcode version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", CTLFLAG_RD, sc->fw_ver_str, 0, "firmware version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", CTLFLAG_RD, sc->mf_mode_str, 0, "multifunction mode"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", CTLFLAG_RD, sc->mac_addr_str, 0, "mac address"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", CTLFLAG_RD, sc->pci_link_str, 0, "pci link status"); SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, "debug logging mode"); #else SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0, "bootcode version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", CTLFLAG_RD, &sc->fw_ver_str, 0, "firmware version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", CTLFLAG_RD, &sc->mf_mode_str, 0, "multifunction mode"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", CTLFLAG_RD, &sc->mac_addr_str, 0, "mac address"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", CTLFLAG_RD, &sc->pci_link_str, 0, "pci link status"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug logging mode"); #endif /* #if __FreeBSD_version >= 900000 */ - sc->trigger_grcdump = 0; - SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump", - CTLFLAG_RW, &sc->trigger_grcdump, 0, - "trigger grcdump should be invoked" - " before collecting grcdump"); + SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "trigger_grcdump", + CTLTYPE_UINT | CTLFLAG_RW, sc, 0, + bxe_sysctl_trigger_grcdump, "IU", + "set by driver when a grcdump is needed"); - sc->grcdump_started = 0; sc->grcdump_done = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done", - CTLFLAG_RD, &sc->grcdump_done, 0, + CTLFLAG_RW, &sc->grcdump_done, 0, "set by driver when grcdump is done"); sc->rx_budget = bxe_rx_budget; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", CTLFLAG_RW, &sc->rx_budget, 0, "rx processing budget"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", CTLTYPE_UINT | CTLFLAG_RW, sc, 0, bxe_sysctl_state, "IU", "dump driver state"); for (i = 0; i < BXE_NUM_ETH_STATS; i++) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, bxe_eth_stats_arr[i].string, CTLTYPE_U64 | CTLFLAG_RD, sc, i, bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string); } /* add a new parent node for all queues "dev.bxe.#.queue" */ queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", CTLFLAG_RD, NULL, "queue"); queue_top_children = SYSCTL_CHILDREN(queue_top); for (i = 0; i < sc->num_queues; i++) { /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, queue_num_buf, CTLFLAG_RD, NULL, "single queue"); queue_children = SYSCTL_CHILDREN(queue); for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { q_stat = ((i << 16) | j); SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, bxe_eth_q_stats_arr[j].string, CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string); } } } static int bxe_alloc_buf_rings(struct bxe_softc *sc) { #if __FreeBSD_version >= 800000 int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, M_NOWAIT, &fp->tx_mtx); if (fp->tx_br == NULL) return (-1); } #endif return (0); } static void bxe_free_buf_rings(struct bxe_softc *sc) { #if __FreeBSD_version >= 800000 int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tx_br) { buf_ring_free(fp->tx_br, M_DEVBUF); fp->tx_br = NULL; } } #endif } static void bxe_init_fp_mutexs(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), "bxe%d_fp%d_tx_lock", sc->unit, i); mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), "bxe%d_fp%d_rx_lock", sc->unit, i); mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); } } static void bxe_destroy_fp_mutexs(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (mtx_initialized(&fp->tx_mtx)) { mtx_destroy(&fp->tx_mtx); } if (mtx_initialized(&fp->rx_mtx)) { mtx_destroy(&fp->rx_mtx); } } } /* * Device attach function. * * Allocates device resources, performs secondary chip identification, and * initializes driver instance variables. This function is called from driver * load after a successful probe. * * Returns: * 0 = Success, >0 = Failure */ static int bxe_attach(device_t dev) { struct bxe_softc *sc; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting attach...\n"); sc->state = BXE_STATE_CLOSED; sc->dev = dev; sc->unit = device_get_unit(dev); BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); sc->pcie_bus = pci_get_bus(dev); sc->pcie_device = pci_get_slot(dev); sc->pcie_func = pci_get_function(dev); /* enable bus master capability */ pci_enable_busmaster(dev); /* get the BARs */ if (bxe_allocate_bars(sc) != 0) { return (ENXIO); } /* initialize the mutexes */ bxe_init_mutexes(sc); /* prepare the periodic callout */ callout_init(&sc->periodic_callout, 0); /* prepare the chip taskqueue */ sc->chip_tq_flags = CHIP_TQ_NONE; snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), "bxe%d_chip_tq", sc->unit); TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, taskqueue_thread_enqueue, &sc->chip_tq); taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ "%s", sc->chip_tq_name); /* get device info and set params */ if (bxe_get_device_info(sc) != 0) { BLOGE(sc, "getting device info\n"); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* get final misc params */ bxe_get_params(sc); /* set the default MTU (changed via ifconfig) */ sc->mtu = ETHERMTU; bxe_set_modes_bitmap(sc); /* XXX * If in AFEX mode and the function is configured for FCoE * then bail... no L2 allowed. */ /* get phy settings from shmem and 'and' against admin settings */ bxe_get_phy_info(sc); /* initialize the FreeBSD ifnet interface */ if (bxe_init_ifnet(sc) != 0) { bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } if (bxe_add_cdev(sc) != 0) { if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate device interrupts */ if (bxe_interrupt_alloc(sc) != 0) { bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } bxe_init_fp_mutexs(sc); if (bxe_alloc_buf_rings(sc) != 0) { bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate ilt */ if (bxe_alloc_ilt_mem(sc) != 0) { bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate the host hardware/software hsi structures */ if (bxe_alloc_hsi_mem(sc) != 0) { bxe_free_ilt_mem(sc); bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* need to reset chip if UNDI was active */ if (IS_PF(sc) && !BXE_NOMCP(sc)) { /* init fw_seq */ sc->fw_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); bxe_prev_unload(sc); } #if 1 /* XXX */ bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); #else if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && SHMEM2_RD(sc, dcbx_lldp_params_offset) && SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); bxe_dcbx_init_params(sc); } else { bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); } #endif /* calculate qm_cid_count */ sc->qm_cid_count = bxe_set_qm_cid_count(sc); BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); sc->max_cos = 1; bxe_init_multi_cos(sc); bxe_add_sysctls(sc); return (0); } /* * Device detach function. * * Stops the controller, resets the controller, and releases resources. * * Returns: * 0 = Success, >0 = Failure */ static int bxe_detach(device_t dev) { struct bxe_softc *sc; if_t ifp; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting detach...\n"); ifp = sc->ifp; if (ifp != NULL && if_vlantrunkinuse(ifp)) { BLOGE(sc, "Cannot detach while VLANs are in use.\n"); return(EBUSY); } bxe_del_cdev(sc); /* stop the periodic callout */ bxe_periodic_stop(sc); /* stop the chip taskqueue */ atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); if (sc->chip_tq) { taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); taskqueue_free(sc->chip_tq); sc->chip_tq = NULL; } /* stop and reset the controller if it was open */ if (sc->state != BXE_STATE_CLOSED) { BXE_CORE_LOCK(sc); bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); sc->state = BXE_STATE_DISABLED; BXE_CORE_UNLOCK(sc); } /* release the network interface */ if (ifp != NULL) { ether_ifdetach(ifp); } ifmedia_removeall(&sc->ifmedia); /* XXX do the following based on driver state... */ /* free the host hardware/software hsi structures */ bxe_free_hsi_mem(sc); /* free ilt */ bxe_free_ilt_mem(sc); bxe_free_buf_rings(sc); /* release the interrupts */ bxe_interrupt_free(sc); /* Release the mutexes*/ bxe_destroy_fp_mutexs(sc); bxe_release_mutexes(sc); /* Release the PCIe BAR mapped memory */ bxe_deallocate_bars(sc); /* Release the FreeBSD interface. */ if (sc->ifp != NULL) { if_free(sc->ifp); } pci_disable_busmaster(dev); return (0); } /* * Device shutdown function. * * Stops and resets the controller. * * Returns: * Nothing */ static int bxe_shutdown(device_t dev) { struct bxe_softc *sc; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); /* stop the periodic callout */ bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); BXE_CORE_UNLOCK(sc); return (0); } void bxe_igu_ack_sb(struct bxe_softc *sc, uint8_t igu_sb_id, uint8_t segment, uint16_t index, uint8_t op, uint8_t update) { uint32_t igu_addr = sc->igu_base_addr; igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); } static void bxe_igu_clear_sb_gen(struct bxe_softc *sc, uint8_t func, uint8_t idu_sb_id, uint8_t is_pf) { uint32_t data, ctl, cnt = 100; uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; uint32_t sb_bit = 1 << (idu_sb_id%32); uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; /* Not supported in BC mode */ if (CHIP_INT_MODE_IS_BC(sc)) { return; } data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP); ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | (func_encode << IGU_CTRL_REG_FID_SHIFT) | (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", data, igu_addr_data); REG_WR(sc, igu_addr_data, data); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(sc, igu_addr_ctl, ctl); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); /* wait for clean up to finish */ while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { DELAY(20000); } if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { BLOGD(sc, DBG_LOAD, "Unable to finish IGU cleanup: " "idu_sb_id %d offset %d bit %d (cnt %d)\n", idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); } } static void bxe_igu_clear_sb(struct bxe_softc *sc, uint8_t idu_sb_id) { bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); } /*******************/ /* ECORE CALLBACKS */ /*******************/ static void bxe_reset_common(struct bxe_softc *sc) { uint32_t val = 0x1400; /* reset_common */ REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); if (CHIP_IS_E3(sc)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); } static void bxe_common_init_phy(struct bxe_softc *sc) { uint32_t shmem_base[2]; uint32_t shmem2_base[2]; /* Avoid common init in case MFW supports LFA */ if (SHMEM2_RD(sc, size) > (uint32_t)offsetof(struct shmem2_region, lfa_host_addr[SC_PORT(sc)])) { return; } shmem_base[0] = sc->devinfo.shmem_base; shmem2_base[0] = sc->devinfo.shmem2_base; if (!CHIP_IS_E1x(sc)) { shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); } bxe_acquire_phy_lock(sc); elink_common_init_phy(sc, shmem_base, shmem2_base, sc->devinfo.chip_id, 0); bxe_release_phy_lock(sc); } static void bxe_pf_disable(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); val &= ~IGU_PF_CONF_FUNC_EN; REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); } static void bxe_init_pxp(struct bxe_softc *sc) { uint16_t devctl; int r_order, w_order; devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); if (sc->mrrs == -1) { r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); } else { BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); r_order = sc->mrrs; } ecore_init_pxp_arb(sc, r_order, w_order); } static uint32_t bxe_get_pretend_reg(struct bxe_softc *sc) { uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); return (base + (SC_ABS_FUNC(sc)) * stride); } /* * Called only on E1H or E2. * When pretending to be PF, the pretend value is the function number 0..7. * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID * combination. */ static int bxe_pretend_func(struct bxe_softc *sc, uint16_t pretend_func_val) { uint32_t pretend_reg; if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { return (-1); } /* get my own pretend register */ pretend_reg = bxe_get_pretend_reg(sc); REG_WR(sc, pretend_reg, pretend_func_val); REG_RD(sc, pretend_reg); return (0); } static void bxe_iov_init_dmae(struct bxe_softc *sc) { return; } static void bxe_iov_init_dq(struct bxe_softc *sc) { return; } /* send a NIG loopback debug packet */ static void bxe_lb_pckt(struct bxe_softc *sc) { uint32_t wb_write[3]; /* Ethernet source and destination addresses */ wb_write[0] = 0x55555555; wb_write[1] = 0x55555555; wb_write[2] = 0x20; /* SOP */ REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); /* NON-IP protocol */ wb_write[0] = 0x09000000; wb_write[1] = 0x55555555; wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); } /* * Some of the internal memories are not directly readable from the driver. * To test them we send debug packets. */ static int bxe_int_mem_test(struct bxe_softc *sc) { int factor; int count, i; uint32_t val = 0; if (CHIP_REV_IS_FPGA(sc)) { factor = 120; } else if (CHIP_REV_IS_EMUL(sc)) { factor = 200; } else { factor = 1; } /* disable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); REG_WR(sc, CFC_REG_DEBUG0, 0x1); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); /* write 0 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send Ethernet packet */ bxe_lb_pckt(sc); /* TODO do i reset NIG statistic? */ /* Wait until NIG register shows 1 packet of size 0x10 */ count = 1000 * factor; while (count) { bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); if (val == 0x10) { break; } DELAY(10000); count--; } if (val != 0x10) { BLOGE(sc, "NIG timeout val=0x%x\n", val); return (-1); } /* wait until PRS register shows 1 packet */ count = (1000 * factor); while (count) { val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val == 1) { break; } DELAY(10000); count--; } if (val != 0x1) { BLOGE(sc, "PRS timeout val=0x%x\n", val); return (-2); } /* Reset and init BRB, PRS */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); DELAY(50000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); DELAY(50000); ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); /* Disable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); REG_WR(sc, CFC_REG_DEBUG0, 0x1); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); /* Write 0 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send 10 Ethernet packets */ for (i = 0; i < 10; i++) { bxe_lb_pckt(sc); } /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ count = (1000 * factor); while (count) { bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); if (val == 0xb0) { break; } DELAY(10000); count--; } if (val != 0xb0) { BLOGE(sc, "NIG timeout val=0x%x\n", val); return (-3); } /* Wait until PRS register shows 2 packets */ val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val != 2) { BLOGE(sc, "PRS timeout val=0x%x\n", val); } /* Write 1 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); /* Wait until PRS register shows 3 packets */ DELAY(10000 * factor); /* Wait until NIG register shows 1 packet of size 0x10 */ val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val != 3) { BLOGE(sc, "PRS timeout val=0x%x\n", val); } /* clear NIG EOP FIFO */ for (i = 0; i < 11; i++) { REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); } val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); if (val != 1) { BLOGE(sc, "clear of NIG failed val=0x%x\n", val); return (-4); } /* Reset and init BRB, PRS, NIG */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); DELAY(50000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); DELAY(50000); ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); if (!CNIC_SUPPORT(sc)) { /* set NIC mode */ REG_WR(sc, PRS_REG_NIC_MODE, 1); } /* Enable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); REG_WR(sc, CFC_REG_DEBUG0, 0x0); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); return (0); } static void bxe_setup_fan_failure_detection(struct bxe_softc *sc) { int is_required; uint32_t val; int port; is_required = 0; val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & SHARED_HW_CFG_FAN_FAILURE_MASK); if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { is_required = 1; } /* * The fan failure mechanism is usually related to the PHY type since * the power consumption of the board is affected by the PHY. Currently, * fan is required for most designs with SFX7101, BCM8727 and BCM8481. */ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { for (port = PORT_0; port < PORT_MAX; port++) { is_required |= elink_fan_failure_det_req(sc, sc->devinfo.shmem_base, sc->devinfo.shmem2_base, port); } } BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); if (is_required == 0) { return; } /* Fan failure is indicated by SPIO 5 */ bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); /* set to active low mode */ val = REG_RD(sc, MISC_REG_SPIO_INT); val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); REG_WR(sc, MISC_REG_SPIO_INT, val); /* enable interrupt to signal the IGU */ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); val |= MISC_SPIO_SPIO5; REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); } static void bxe_enable_blocks_attention(struct bxe_softc *sc) { uint32_t val; REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); } else { REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); } REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); /* * mask read length error interrupts in brb for parser * (parsing unit and 'checksum and crc' unit) * these errors are legal (PU reads fixed length and CAC can cause * read length error on truncated packets) */ REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); REG_WR(sc, QM_REG_QM_INT_MASK, 0); REG_WR(sc, TM_REG_TM_INT_MASK, 0); REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); if (!CHIP_IS_E1x(sc)) { val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); } REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ if (!CHIP_IS_E1x(sc)) { /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); } REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ } /** * bxe_init_hw_common - initialize the HW at the COMMON phase. * * @sc: driver handle */ static int bxe_init_hw_common(struct bxe_softc *sc) { uint8_t abs_func_id; uint32_t val; BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", SC_ABS_FUNC(sc)); /* * take the RESET lock to protect undi_unload flow from accessing * registers while we are resetting the chip */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); bxe_reset_common(sc); REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); val = 0xfffc; if (CHIP_IS_E3(sc)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); BLOGD(sc, DBG_LOAD, "after misc block init\n"); if (!CHIP_IS_E1x(sc)) { /* * 4-port mode or 2-port mode we need to turn off master-enable for * everyone. After that we turn it back on for self. So, we disregard * multi-function, and always disable all functions on the given path, * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 */ for (abs_func_id = SC_PATH(sc); abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) { if (abs_func_id == SC_ABS_FUNC(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); continue; } bxe_pretend_func(sc, abs_func_id); /* clear pf enable */ bxe_pf_disable(sc); bxe_pretend_func(sc, SC_ABS_FUNC(sc)); } } BLOGD(sc, DBG_LOAD, "after pf disable\n"); ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); if (CHIP_IS_E1(sc)) { /* * enable HW interrupt from PXP on USDM overflow * bit 16 on INT_MASK_0 */ REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); } ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); bxe_init_pxp(sc); #ifdef __BIG_ENDIAN REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); /* make sure this value is 0 */ REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); #endif ecore_ilt_init_page_size(sc, INITOP_SET); if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); } /* let the HW do it's magic... */ DELAY(100000); /* finish PXP init */ val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); if (val != 1) { BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n", val); return (-1); } val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); if (val != 1) { BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val); return (-1); } BLOGD(sc, DBG_LOAD, "after pxp init\n"); /* * Timer bug workaround for E2 only. We need to set the entire ILT to have * entries with value "0" and valid bit on. This needs to be done by the * first PF that is loaded in a path (i.e. common phase) */ if (!CHIP_IS_E1x(sc)) { /* * In E2 there is a bug in the timers block that can cause function 6 / 7 * (i.e. vnic3) to start even if it is marked as "scan-off". * This occurs when a different function (func2,3) is being marked * as "scan-off". Real-life scenario for example: if a driver is being * load-unloaded while func6,7 are down. This will cause the timer to access * the ilt, translate to a logical address and send a request to read/write. * Since the ilt for the function that is down is not valid, this will cause * a translation error which is unrecoverable. * The Workaround is intended to make sure that when this happens nothing * fatal will occur. The workaround: * 1. First PF driver which loads on a path will: * a. After taking the chip out of reset, by using pretend, * it will write "0" to the following registers of * the other vnics. * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); * And for itself it will write '1' to * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable * dmae-operations (writing to pram for example.) * note: can be done for only function 6,7 but cleaner this * way. * b. Write zero+valid to the entire ILT. * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of * VNIC3 (of that port). The range allocated will be the * entire ILT. This is needed to prevent ILT range error. * 2. Any PF driver load flow: * a. ILT update with the physical addresses of the allocated * logical pages. * b. Wait 20msec. - note that this timeout is needed to make * sure there are no requests in one of the PXP internal * queues with "old" ILT addresses. * c. PF enable in the PGLC. * d. Clear the was_error of the PF in the PGLC. (could have * occurred while driver was down) * e. PF enable in the CFC (WEAK + STRONG) * f. Timers scan enable * 3. PF driver unload flow: * a. Clear the Timers scan_en. * b. Polling for scan_on=0 for that PF. * c. Clear the PF enable bit in the PXP. * d. Clear the PF enable in the CFC (WEAK + STRONG) * e. Write zero+valid to all ILT entries (The valid bit must * stay set) * f. If this is VNIC 3 of a port then also init * first_timers_ilt_entry to zero and last_timers_ilt_entry * to the last enrty in the ILT. * * Notes: * Currently the PF error in the PGLC is non recoverable. * In the future the there will be a recovery routine for this error. * Currently attention is masked. * Having an MCP lock on the load/unload process does not guarantee that * there is no Timer disable during Func6/7 enable. This is because the * Timers scan is currently being cleared by the MCP on FLR. * Step 2.d can be done only for PF6/7 and the driver can also check if * there is error before clearing it. But the flow above is simpler and * more general. * All ILT entries are written by zero+valid and not just PF6/7 * ILT entries since in the future the ILT entries allocation for * PF-s might be dynamic. */ struct ilt_client_info ilt_cli; struct ecore_ilt ilt; memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); memset(&ilt, 0, sizeof(struct ecore_ilt)); /* initialize dummy TM client */ ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; /* * Step 1: set zeroes to all ilt page entries with valid bit on * Step 2: set the timers first/last ilt entry to point * to the entire range to prevent ILT range error for 3rd/4th * vnic (this code assumes existence of the vnic) * * both steps performed by call to ecore_ilt_client_init_op() * with dummy TM client * * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT * and his brother are split registers */ bxe_pretend_func(sc, (SC_PATH(sc) + 6)); ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); bxe_pretend_func(sc, SC_ABS_FUNC(sc)); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); } REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); if (!CHIP_IS_E1x(sc)) { int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : (CHIP_REV_IS_FPGA(sc) ? 400 : 0); ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); /* let the HW do it's magic... */ do { DELAY(200000); val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); } while (factor-- && (val != 1)); if (val != 1) { BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val); return (-1); } } BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); bxe_iov_init_dmae(sc); /* clean the DMAE memory */ sc->dmae_ready = 1; ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); /* QM queues pointers table */ ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); /* soft reset pulse */ REG_WR(sc, QM_REG_SOFT_RESET, 1); REG_WR(sc, QM_REG_SOFT_RESET, 0); if (CNIC_SUPPORT(sc)) ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); if (!CHIP_REV_IS_SLOW(sc)) { /* enable hw interrupt from doorbell Q */ REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); } ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); if (!CHIP_IS_E1(sc)) { REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); } if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { if (IS_MF_AFEX(sc)) { /* * configure that AFEX and VLAN headers must be * received in AFEX mode */ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); } else { /* * Bit-map indicating which L2 hdrs may appear * after the basic Ethernet header */ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); } } ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); if (!CHIP_IS_E1x(sc)) { /* reset VFC memories */ REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); DELAY(20000); } ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); /* sync semi rtc */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); if (!CHIP_IS_E1x(sc)) { if (IS_MF_AFEX(sc)) { /* * configure that AFEX and VLAN headers must be * sent in AFEX mode */ REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); } else { REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); } } REG_WR(sc, SRC_REG_SOFT_RST, 1); ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); if (CNIC_SUPPORT(sc)) { REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); } REG_WR(sc, SRC_REG_SOFT_RST, 0); if (sizeof(union cdu_context) != 1024) { /* we currently assume that a context is 1024 bytes */ BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", (long)sizeof(union cdu_context)); } ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); val = (4 << 24) + (0 << 12) + 1024; REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); /* enable context validation interrupt from CFC */ REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); /* set the thresholds to prevent CFC/CDU race */ REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); } ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); /* Reset PCIE errors for debug */ REG_WR(sc, 0x2814, 0xffffffff); REG_WR(sc, 0x3820, 0xffffffff); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); } ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); if (!CHIP_IS_E1(sc)) { /* in E3 this done in per-port section */ if (!CHIP_IS_E3(sc)) REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); } if (CHIP_IS_E1H(sc)) { /* not applicable for E2 (and above ...) */ REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); } if (CHIP_REV_IS_SLOW(sc)) { DELAY(200000); } /* finish CFC init */ val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val); return (-1); } REG_WR(sc, CFC_REG_DEBUG0, 0); if (CHIP_IS_E1(sc)) { /* read NIG statistic to see if this is our first up since powerup */ bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); /* do internal memory self test */ if ((val == 0) && bxe_int_mem_test(sc)) { BLOGE(sc, "internal mem self test failed val=0x%x\n", val); return (-1); } } bxe_setup_fan_failure_detection(sc); /* clear PXP2 attentions */ REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); bxe_enable_blocks_attention(sc); if (!CHIP_REV_IS_SLOW(sc)) { ecore_enable_blocks_parity(sc); } if (!BXE_NOMCP(sc)) { if (CHIP_IS_E1x(sc)) { bxe_common_init_phy(sc); } } return (0); } /** * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. * * @sc: driver handle */ static int bxe_init_hw_common_chip(struct bxe_softc *sc) { int rc = bxe_init_hw_common(sc); if (rc) { BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc); return (rc); } /* In E2 2-PORT mode, same ext phy is used for the two paths */ if (!BXE_NOMCP(sc)) { bxe_common_init_phy(sc); } return (0); } static int bxe_init_hw_port(struct bxe_softc *sc) { int port = SC_PORT(sc); int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; uint32_t low, high; uint32_t val; BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); ecore_init_block(sc, BLOCK_MISC, init_phase); ecore_init_block(sc, BLOCK_PXP, init_phase); ecore_init_block(sc, BLOCK_PXP2, init_phase); /* * Timers bug workaround: disables the pf_master bit in pglue at * common phase, we need to enable it here before any dmae access are * attempted. Therefore we manually added the enable-master to the * port phase (it also happens in the function phase) */ if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); } ecore_init_block(sc, BLOCK_ATC, init_phase); ecore_init_block(sc, BLOCK_DMAE, init_phase); ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); ecore_init_block(sc, BLOCK_QM, init_phase); ecore_init_block(sc, BLOCK_TCM, init_phase); ecore_init_block(sc, BLOCK_UCM, init_phase); ecore_init_block(sc, BLOCK_CCM, init_phase); ecore_init_block(sc, BLOCK_XCM, init_phase); /* QM cid (connection) count */ ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); if (CNIC_SUPPORT(sc)) { ecore_init_block(sc, BLOCK_TM, init_phase); REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); } ecore_init_block(sc, BLOCK_DORQ, init_phase); ecore_init_block(sc, BLOCK_BRB1, init_phase); if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { if (IS_MF(sc)) { low = (BXE_ONE_PORT(sc) ? 160 : 246); } else if (sc->mtu > 4096) { if (BXE_ONE_PORT(sc)) { low = 160; } else { val = sc->mtu; /* (24*1024 + val*4)/256 */ low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); } } else { low = (BXE_ONE_PORT(sc) ? 80 : 160); } high = (low + 56); /* 14*1024/256 */ REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); } if (CHIP_IS_MODE_4_PORT(sc)) { REG_WR(sc, SC_PORT(sc) ? BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0, 40); } ecore_init_block(sc, BLOCK_PRS, init_phase); if (CHIP_IS_E3B0(sc)) { if (IS_MF_AFEX(sc)) { /* configure headers for AFEX mode */ REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); REG_WR(sc, SC_PORT(sc) ? PRS_REG_MUST_HAVE_HDRS_PORT_1 : PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); } else { /* Ovlan exists only if we are in multi-function + * switch-dependent mode, in switch-independent there * is no ovlan headers */ REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); } } ecore_init_block(sc, BLOCK_TSDM, init_phase); ecore_init_block(sc, BLOCK_CSDM, init_phase); ecore_init_block(sc, BLOCK_USDM, init_phase); ecore_init_block(sc, BLOCK_XSDM, init_phase); ecore_init_block(sc, BLOCK_TSEM, init_phase); ecore_init_block(sc, BLOCK_USEM, init_phase); ecore_init_block(sc, BLOCK_CSEM, init_phase); ecore_init_block(sc, BLOCK_XSEM, init_phase); ecore_init_block(sc, BLOCK_UPB, init_phase); ecore_init_block(sc, BLOCK_XPB, init_phase); ecore_init_block(sc, BLOCK_PBF, init_phase); if (CHIP_IS_E1x(sc)) { /* configure PBF to work without PAUSE mtu 9000 */ REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); /* update threshold */ REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); /* update init credit */ REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); /* probe changes */ REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); DELAY(50); REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); } if (CNIC_SUPPORT(sc)) { ecore_init_block(sc, BLOCK_SRC, init_phase); } ecore_init_block(sc, BLOCK_CDU, init_phase); ecore_init_block(sc, BLOCK_CFC, init_phase); if (CHIP_IS_E1(sc)) { REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } ecore_init_block(sc, BLOCK_HC, init_phase); ecore_init_block(sc, BLOCK_IGU, init_phase); ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: * - SF mode: bits 3-7 are masked. only bits 0-2 are in use * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF * bits 4-7 are used for "per vn group attention" */ val = IS_MF(sc) ? 0xF7 : 0x7; /* Enable DCBX attention for all but E1 */ val |= CHIP_IS_E1(sc) ? 0 : 0x10; REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); ecore_init_block(sc, BLOCK_NIG, init_phase); if (!CHIP_IS_E1x(sc)) { /* Bit-map indicating which L2 hdrs may appear after the * basic Ethernet header */ if (IS_MF_AFEX(sc)) { REG_WR(sc, SC_PORT(sc) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); } else { REG_WR(sc, SC_PORT(sc) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, IS_MF_SD(sc) ? 7 : 6); } if (CHIP_IS_E3(sc)) { REG_WR(sc, SC_PORT(sc) ? NIG_REG_LLH1_MF_MODE : NIG_REG_LLH_MF_MODE, IS_MF(sc)); } } if (!CHIP_IS_E3(sc)) { REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); } if (!CHIP_IS_E1(sc)) { /* 0x2 disable mf_ov, 0x1 enable */ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, (IS_MF_SD(sc) ? 0x1 : 0x2)); if (!CHIP_IS_E1x(sc)) { val = 0; switch (sc->devinfo.mf_info.mf_mode) { case MULTI_FUNCTION_SD: val = 1; break; case MULTI_FUNCTION_SI: case MULTI_FUNCTION_AFEX: val = 2; break; } REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : NIG_REG_LLH0_CLS_TYPE), val); } REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); } /* If SPIO5 is set to generate interrupts, enable it for this port */ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); if (val & MISC_SPIO_SPIO5) { uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); val = REG_RD(sc, reg_addr); val |= AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(sc, reg_addr, val); } return (0); } static uint32_t bxe_flr_clnup_reg_poll(struct bxe_softc *sc, uint32_t reg, uint32_t expected, uint32_t poll_count) { uint32_t cur_cnt = poll_count; uint32_t val; while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); } return (val); } static int bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, uint32_t reg, char *msg, uint32_t poll_cnt) { uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); if (val != 0) { BLOGE(sc, "%s usage count=%d\n", msg, val); return (1); } return (0); } /* Common routines with VF FLR cleanup */ static uint32_t bxe_flr_clnup_poll_count(struct bxe_softc *sc) { /* adjust polling timeout */ if (CHIP_REV_IS_EMUL(sc)) { return (FLR_POLL_CNT * 2000); } if (CHIP_REV_IS_FPGA(sc)) { return (FLR_POLL_CNT * 120); } return (FLR_POLL_CNT); } static int bxe_poll_hw_usage_counters(struct bxe_softc *sc, uint32_t poll_cnt) { /* wait for CFC PF usage-counter to zero (includes all the VFs) */ if (bxe_flr_clnup_poll_hw_counter(sc, CFC_REG_NUM_LCIDS_INSIDE_PF, "CFC PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, DORQ_REG_PF_USAGE_CNT, "DQ PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), "QM PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), "Timers VNIC usage counter timed out", poll_cnt)) { return (1); } if (bxe_flr_clnup_poll_hw_counter(sc, TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), "Timers NUM_SCANS usage counter timed out", poll_cnt)) { return (1); } /* Wait DMAE PF usage counter to zero */ if (bxe_flr_clnup_poll_hw_counter(sc, dmae_reg_go_c[INIT_DMAE_C(sc)], "DMAE dommand register timed out", poll_cnt)) { return (1); } return (0); } #define OP_GEN_PARAM(param) \ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) #define OP_GEN_TYPE(type) \ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) #define OP_GEN_AGG_VECT(index) \ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) static int bxe_send_final_clnup(struct bxe_softc *sc, uint8_t clnup_func, uint32_t poll_cnt) { uint32_t op_gen_command = 0; uint32_t comp_addr = (BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); int ret = 0; if (REG_RD(sc, comp_addr)) { BLOGE(sc, "Cleanup complete was not 0 before sending\n"); return (1); } op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); op_gen_command |= OP_GEN_AGG_VECT(clnup_func); op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { BLOGE(sc, "FW final cleanup did not succeed\n"); BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", (REG_RD(sc, comp_addr))); bxe_panic(sc, ("FLR cleanup failed\n")); return (1); } /* Zero completion for nxt FLR */ REG_WR(sc, comp_addr, 0); return (ret); } static void bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, struct pbf_pN_buf_regs *regs, uint32_t poll_count) { uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; uint32_t cur_cnt = poll_count; crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); crd = crd_start = REG_RD(sc, regs->crd); init_crd = REG_RD(sc, regs->init_crd); BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); while ((crd != init_crd) && ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < (init_crd - crd_start))) { if (cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); crd = REG_RD(sc, regs->crd); crd_freed = REG_RD(sc, regs->crd_freed); } else { BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); break; } } BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, struct pbf_pN_cmd_regs *regs, uint32_t poll_count) { uint32_t occup, to_free, freed, freed_start; uint32_t cur_cnt = poll_count; occup = to_free = REG_RD(sc, regs->lines_occup); freed = freed_start = REG_RD(sc, regs->lines_freed); BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); while (occup && ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { if (cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); occup = REG_RD(sc, regs->lines_occup); freed = REG_RD(sc, regs->lines_freed); } else { BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); break; } } BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) { struct pbf_pN_cmd_regs cmd_regs[] = { {0, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, {1, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, {4, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_LB_Q : PBF_REG_P4_TQ_LINES_FREED_CNT} }; struct pbf_pN_buf_regs buf_regs[] = { {0, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD , (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, {1, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, {4, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, }; int i; /* Verify the command queues are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); } /* Verify the transmission buffers are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); } } static void bxe_hw_enable_status(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); val = REG_RD(sc, PBF_REG_DISABLE_PF); BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); } static int bxe_pf_flr_clnup(struct bxe_softc *sc) { uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); /* Re-enable PF target read access */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); /* Poll HW usage counters */ BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { return (-1); } /* Zero the igu 'trailing edge' and 'leading edge' */ /* Send the FW cleanup command */ if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { return (-1); } /* ATC cleanup */ /* Verify TX hw is flushed */ bxe_tx_hw_flushed(sc, poll_cnt); /* Wait 100ms (not adjusted according to platform) */ DELAY(100000); /* Verify no pending pci transactions */ if (bxe_is_pcie_pending(sc)) { BLOGE(sc, "PCIE Transactions still pending\n"); } /* Debug */ bxe_hw_enable_status(sc); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function init */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); return (0); } static int bxe_init_hw_func(struct bxe_softc *sc) { int port = SC_PORT(sc); int func = SC_FUNC(sc); int init_phase = PHASE_PF0 + func; struct ecore_ilt *ilt = sc->ilt; uint16_t cdu_ilt_start; uint32_t addr, val; uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; int i, main_mem_width, rc; BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); /* FLR cleanup */ if (!CHIP_IS_E1x(sc)) { rc = bxe_pf_flr_clnup(sc); if (rc) { BLOGE(sc, "FLR cleanup failed!\n"); // XXX bxe_fw_dump(sc); // XXX bxe_idle_chk(sc); return (rc); } } /* set MSI reconfigure capability */ if (sc->devinfo.int_block == INT_BLOCK_HC) { addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); val = REG_RD(sc, addr); val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; REG_WR(sc, addr, val); } ecore_init_block(sc, BLOCK_PXP, init_phase); ecore_init_block(sc, BLOCK_PXP2, init_phase); ilt = sc->ilt; cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; for (i = 0; i < L2_ILT_LINES(sc); i++) { ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; ilt->lines[cdu_ilt_start + i].page_mapping = sc->context[i].vcxt_dma.paddr; ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; } ecore_ilt_init_op(sc, INITOP_SET); /* Set NIC mode */ REG_WR(sc, PRS_REG_NIC_MODE, 1); BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); if (!CHIP_IS_E1x(sc)) { uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; /* Turn on a single ISR mode in IGU if driver is going to use * INT#x or MSI */ if (sc->interrupt_mode != INTR_MODE_MSIX) { pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; } /* * Timers workaround bug: function init part. * Need to wait 20msec after initializing ILT, * needed to make sure there are no requests in * one of the PXP internal queues with "old" ILT addresses */ DELAY(20000); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function * init */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); /* Enable the function in IGU */ REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); } sc->dmae_ready = 1; ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); ecore_init_block(sc, BLOCK_ATC, init_phase); ecore_init_block(sc, BLOCK_DMAE, init_phase); ecore_init_block(sc, BLOCK_NIG, init_phase); ecore_init_block(sc, BLOCK_SRC, init_phase); ecore_init_block(sc, BLOCK_MISC, init_phase); ecore_init_block(sc, BLOCK_TCM, init_phase); ecore_init_block(sc, BLOCK_UCM, init_phase); ecore_init_block(sc, BLOCK_CCM, init_phase); ecore_init_block(sc, BLOCK_XCM, init_phase); ecore_init_block(sc, BLOCK_TSEM, init_phase); ecore_init_block(sc, BLOCK_USEM, init_phase); ecore_init_block(sc, BLOCK_CSEM, init_phase); ecore_init_block(sc, BLOCK_XSEM, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, QM_REG_PF_EN, 1); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); } ecore_init_block(sc, BLOCK_QM, init_phase); ecore_init_block(sc, BLOCK_TM, init_phase); ecore_init_block(sc, BLOCK_DORQ, init_phase); bxe_iov_init_dq(sc); ecore_init_block(sc, BLOCK_BRB1, init_phase); ecore_init_block(sc, BLOCK_PRS, init_phase); ecore_init_block(sc, BLOCK_TSDM, init_phase); ecore_init_block(sc, BLOCK_CSDM, init_phase); ecore_init_block(sc, BLOCK_USDM, init_phase); ecore_init_block(sc, BLOCK_XSDM, init_phase); ecore_init_block(sc, BLOCK_UPB, init_phase); ecore_init_block(sc, BLOCK_XPB, init_phase); ecore_init_block(sc, BLOCK_PBF, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, PBF_REG_DISABLE_PF, 0); ecore_init_block(sc, BLOCK_CDU, init_phase); ecore_init_block(sc, BLOCK_CFC, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); if (IS_MF(sc)) { REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); } ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); /* HC init per function */ if (sc->devinfo.int_block == INT_BLOCK_HC) { if (CHIP_IS_E1H(sc)) { REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } ecore_init_block(sc, BLOCK_HC, init_phase); } else { int num_segs, sb_idx, prod_offset; REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); } ecore_init_block(sc, BLOCK_IGU, init_phase); if (!CHIP_IS_E1x(sc)) { int dsb_idx = 0; /** * Producer memory: * E2 mode: address 0-135 match to the mapping memory; * 136 - PF0 default prod; 137 - PF1 default prod; * 138 - PF2 default prod; 139 - PF3 default prod; * 140 - PF0 attn prod; 141 - PF1 attn prod; * 142 - PF2 attn prod; 143 - PF3 attn prod; * 144-147 reserved. * * E1.5 mode - In backward compatible mode; * for non default SB; each even line in the memory * holds the U producer and each odd line hold * the C producer. The first 128 producers are for * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 * producers are for the DSB for each PF. * Each PF has five segments: (the order inside each * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; * 132-135 C prods; 136-139 X prods; 140-143 T prods; * 144-147 attn prods; */ /* non-default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { prod_offset = (sc->igu_base_sb + sb_idx) * num_segs; for (i = 0; i < num_segs; i++) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i) * 4; REG_WR(sc, addr, 0); } /* send consumer update with value 0 */ bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); } /* default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; if (CHIP_IS_MODE_4_PORT(sc)) dsb_idx = SC_FUNC(sc); else dsb_idx = SC_VN(sc); prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_BASE_DSB_PROD + dsb_idx : IGU_NORM_BASE_DSB_PROD + dsb_idx); /* * igu prods come in chunks of E1HVN_MAX (4) - * does not matters what is the current chip mode */ for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i)*4; REG_WR(sc, addr, 0); } /* send consumer update with 0 */ if (CHIP_INT_MODE_IS_BC(sc)) { bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, CSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, XSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, TSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } else { bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } bxe_igu_clear_sb(sc, sc->igu_dsb_id); /* !!! these should become driver const once rf-tool supports split-68 const */ REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); } } /* Reset PCIE errors for debug */ REG_WR(sc, 0x2114, 0xffffffff); REG_WR(sc, 0x2120, 0xffffffff); if (CHIP_IS_E1x(sc)) { main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ main_mem_base = HC_REG_MAIN_MEMORY + SC_PORT(sc) * (main_mem_size * 4); main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; main_mem_width = 8; val = REG_RD(sc, main_mem_prty_clr); if (val) { BLOGD(sc, DBG_LOAD, "Parity errors in HC block during function init (0x%x)!\n", val); } /* Clear "false" parity errors in MSI-X table */ for (i = main_mem_base; i < main_mem_base + main_mem_size * 4; i += main_mem_width) { bxe_read_dmae(sc, i, main_mem_width / 4); bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), i, main_mem_width / 4); } /* Clear HC parity attention */ REG_RD(sc, main_mem_prty_clr); } #if 1 /* Enable STORMs SP logging */ REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); #endif elink_phy_probe(&sc->link_params); return (0); } static void bxe_link_reset(struct bxe_softc *sc) { if (!BXE_NOMCP(sc)) { bxe_acquire_phy_lock(sc); elink_lfa_reset(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); } else { if (!CHIP_REV_IS_SLOW(sc)) { BLOGW(sc, "Bootcode is missing - cannot reset link\n"); } } } static void bxe_reset_port(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t val; /* reset physical Link */ bxe_link_reset(sc); REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); /* Do not rcv packets to BRB */ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); /* Do not direct rcv packets that are not for MCP to the BRB */ REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); /* Configure AEU */ REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); DELAY(100000); /* Check for BRB port occupancy */ val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); if (val) { BLOGD(sc, DBG_LOAD, "BRB1 is not empty, %d blocks are occupied\n", val); } /* TODO: Close Doorbell port? */ } static void bxe_ilt_wr(struct bxe_softc *sc, uint32_t index, bus_addr_t addr) { int reg; uint32_t wb_write[2]; if (CHIP_IS_E1(sc)) { reg = PXP2_REG_RQ_ONCHIP_AT + index*8; } else { reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; } wb_write[0] = ONCHIP_ADDR1(addr); wb_write[1] = ONCHIP_ADDR2(addr); REG_WR_DMAE(sc, reg, wb_write, 2); } static void bxe_clear_func_ilt(struct bxe_softc *sc, uint32_t func) { uint32_t i, base = FUNC_ILT_BASE(func); for (i = base; i < base + ILT_PER_FUNC; i++) { bxe_ilt_wr(sc, i, 0); } } static void bxe_reset_func(struct bxe_softc *sc) { struct bxe_fastpath *fp; int port = SC_PORT(sc); int func = SC_FUNC(sc); int i; /* Disable the function in the FW */ REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); /* FP SBs */ FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), SB_DISABLED); } /* SP SB */ REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); } /* Configure IGU */ if (sc->devinfo.int_block == INT_BLOCK_HC) { REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } else { REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); } if (CNIC_LOADED(sc)) { /* Disable Timer scan */ REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); /* * Wait for at least 10ms and up to 2 second for the timers * scan to complete */ for (i = 0; i < 200; i++) { DELAY(10000); if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) break; } } /* Clear ILT */ bxe_clear_func_ilt(sc, func); /* * Timers workaround bug for E2: if this is vnic-3, * we need to set the entire ilt range for this timers. */ if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { struct ilt_client_info ilt_cli; /* use dummy TM client */ memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); } /* this assumes that reset_port() called before reset_func()*/ if (!CHIP_IS_E1x(sc)) { bxe_pf_disable(sc); } sc->dmae_ready = 0; } static int bxe_gunzip_init(struct bxe_softc *sc) { return (0); } static void bxe_gunzip_end(struct bxe_softc *sc) { return; } static int bxe_init_firmware(struct bxe_softc *sc) { if (CHIP_IS_E1(sc)) { ecore_init_e1_firmware(sc); sc->iro_array = e1_iro_arr; } else if (CHIP_IS_E1H(sc)) { ecore_init_e1h_firmware(sc); sc->iro_array = e1h_iro_arr; } else if (!CHIP_IS_E1x(sc)) { ecore_init_e2_firmware(sc); sc->iro_array = e2_iro_arr; } else { BLOGE(sc, "Unsupported chip revision\n"); return (-1); } return (0); } static void bxe_release_firmware(struct bxe_softc *sc) { /* Do nothing */ return; } static int ecore_gunzip(struct bxe_softc *sc, const uint8_t *zbuf, int len) { /* XXX : Implement... */ BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); return (FALSE); } static void ecore_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val) { bxe_reg_wr_ind(sc, addr, val); } static void ecore_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len) { bxe_write_dmae_phys_len(sc, phys_addr, addr, len); } void ecore_storm_memset_struct(struct bxe_softc *sc, uint32_t addr, size_t size, uint32_t *data) { uint8_t i; for (i = 0; i < size/4; i++) { REG_WR(sc, addr + (i * 4), data[i]); } } /* * character device - ioctl interface definitions */ #include "bxe_dump.h" #include "bxe_ioctl.h" #include static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); static struct cdevsw bxe_cdevsw = { .d_version = D_VERSION, .d_ioctl = bxe_eioctl, .d_name = "bxecnic", }; #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1)) #define DUMP_ALL_PRESETS 0x1FFF #define DUMP_MAX_PRESETS 13 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) #define IS_REG_IN_PRESET(presets, idx) \ ((presets & (1 << (idx-1))) == (1 << (idx-1))) static int bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) { if (CHIP_IS_E1(sc)) return dump_num_registers[0][preset-1]; else if (CHIP_IS_E1H(sc)) return dump_num_registers[1][preset-1]; else if (CHIP_IS_E2(sc)) return dump_num_registers[2][preset-1]; else if (CHIP_IS_E3A0(sc)) return dump_num_registers[3][preset-1]; else if (CHIP_IS_E3B0(sc)) return dump_num_registers[4][preset-1]; else return 0; } static int bxe_get_total_regs_len32(struct bxe_softc *sc) { uint32_t preset_idx; int regdump_len32 = 0; /* Calculate the total preset regs length */ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx); } return regdump_len32; } static const uint32_t * __bxe_get_page_addr_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_vals_e2; else if (CHIP_IS_E3(sc)) return page_vals_e3; else return NULL; } static uint32_t __bxe_get_page_reg_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_MODE_VALUES_E2; else if (CHIP_IS_E3(sc)) return PAGE_MODE_VALUES_E3; else return 0; } static const uint32_t * __bxe_get_page_write_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_write_regs_e2; else if (CHIP_IS_E3(sc)) return page_write_regs_e3; else return NULL; } static uint32_t __bxe_get_page_write_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_WRITE_REGS_E2; else if (CHIP_IS_E3(sc)) return PAGE_WRITE_REGS_E3; else return 0; } static const struct reg_addr * __bxe_get_page_read_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_read_regs_e2; else if (CHIP_IS_E3(sc)) return page_read_regs_e3; else return NULL; } static uint32_t __bxe_get_page_read_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_READ_REGS_E2; else if (CHIP_IS_E3(sc)) return PAGE_READ_REGS_E3; else return 0; } static bool bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info) { if (CHIP_IS_E1(sc)) return IS_E1_REG(reg_info->chips); else if (CHIP_IS_E1H(sc)) return IS_E1H_REG(reg_info->chips); else if (CHIP_IS_E2(sc)) return IS_E2_REG(reg_info->chips); else if (CHIP_IS_E3A0(sc)) return IS_E3A0_REG(reg_info->chips); else if (CHIP_IS_E3B0(sc)) return IS_E3B0_REG(reg_info->chips); else return 0; } static bool bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info) { if (CHIP_IS_E1(sc)) return IS_E1_REG(wreg_info->chips); else if (CHIP_IS_E1H(sc)) return IS_E1H_REG(wreg_info->chips); else if (CHIP_IS_E2(sc)) return IS_E2_REG(wreg_info->chips); else if (CHIP_IS_E3A0(sc)) return IS_E3A0_REG(wreg_info->chips); else if (CHIP_IS_E3B0(sc)) return IS_E3B0_REG(wreg_info->chips); else return 0; } /** * bxe_read_pages_regs - read "paged" registers * * @bp device handle * @p output buffer * * Reads "paged" memories: memories that may only be read by first writing to a * specific address ("write address") and then reading from a specific address * ("read address"). There may be more than one write address per "page" and * more than one read address per write address. */ static void bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) { uint32_t i, j, k, n; /* addresses of the paged registers */ const uint32_t *page_addr = __bxe_get_page_addr_ar(sc); /* number of paged registers */ int num_pages = __bxe_get_page_reg_num(sc); /* write addresses */ const uint32_t *write_addr = __bxe_get_page_write_ar(sc); /* number of write addresses */ int write_num = __bxe_get_page_write_num(sc); /* read addresses info */ const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc); /* number of read addresses */ int read_num = __bxe_get_page_read_num(sc); uint32_t addr, size; for (i = 0; i < num_pages; i++) { for (j = 0; j < write_num; j++) { REG_WR(sc, write_addr[j], page_addr[i]); for (k = 0; k < read_num; k++) { if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) { size = read_addr[k].size; for (n = 0; n < size; n++) { addr = read_addr[k].addr + n*4; *p++ = REG_RD(sc, addr); } } } } } return; } static int bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) { uint32_t i, j, addr; const struct wreg_addr *wreg_addr_p = NULL; if (CHIP_IS_E1(sc)) wreg_addr_p = &wreg_addr_e1; else if (CHIP_IS_E1H(sc)) wreg_addr_p = &wreg_addr_e1h; else if (CHIP_IS_E2(sc)) wreg_addr_p = &wreg_addr_e2; else if (CHIP_IS_E3A0(sc)) wreg_addr_p = &wreg_addr_e3; else if (CHIP_IS_E3B0(sc)) wreg_addr_p = &wreg_addr_e3b0; else return (-1); /* Read the idle_chk registers */ for (i = 0; i < IDLE_REGS_COUNT; i++) { if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) && IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { for (j = 0; j < idle_reg_addrs[i].size; j++) *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4); } } /* Read the regular registers */ for (i = 0; i < REGS_COUNT; i++) { if (bxe_is_reg_in_chip(sc, ®_addrs[i]) && IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { for (j = 0; j < reg_addrs[i].size; j++) *p++ = REG_RD(sc, reg_addrs[i].addr + j*4); } } /* Read the CAM registers */ if (bxe_is_wreg_in_chip(sc, wreg_addr_p) && IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { for (i = 0; i < wreg_addr_p->size; i++) { *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); /* In case of wreg_addr register, read additional registers from read_regs array */ for (j = 0; j < wreg_addr_p->read_regs_count; j++) { addr = *(wreg_addr_p->read_regs); *p++ = REG_RD(sc, addr + j*4); } } } /* Paged registers are supported in E2 & E3 only */ if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { /* Read "paged" registers */ bxe_read_pages_regs(sc, p, preset); } return 0; } -int +static int bxe_grc_dump(struct bxe_softc *sc) { int rval = 0; uint32_t preset_idx; uint8_t *buf; uint32_t size; struct dump_header *d_hdr; - uint32_t i; - uint32_t reg_val; - uint32_t reg_addr; - uint32_t cmd_offset; - int context_size; - int allocated; - struct ecore_ilt *ilt = SC_ILT(sc); - struct bxe_fastpath *fp; - struct ilt_client_info *ilt_cli; - int grc_dump_size; - - if (sc->grcdump_done || sc->grcdump_started) + if (sc->grcdump_done) return (rval); - sc->grcdump_started = 1; - BLOGI(sc, "Started collecting grcdump\n"); - - grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + - sizeof(struct dump_header); - - sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); - - if (sc->grc_dump == NULL) { - BLOGW(sc, "Unable to allocate memory for grcdump collection\n"); - return(ENOMEM); - } - - - - /* Disable parity attentions as long as following dump may - * cause false alarms by reading never written registers. We - * will re-enable parity attentions right after the dump. - */ - - /* Disable parity on path 0 */ - bxe_pretend_func(sc, 0); - ecore_disable_blocks_parity(sc); - /* Disable parity on path 1 */ - bxe_pretend_func(sc, 1); - ecore_disable_blocks_parity(sc); - - /* Return to current function */ - bxe_pretend_func(sc, SC_ABS_FUNC(sc)); - buf = sc->grc_dump; d_hdr = sc->grc_dump; d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; d_hdr->version = BNX2X_DUMP_VERSION; d_hdr->preset = DUMP_ALL_PRESETS; if (CHIP_IS_E1(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E1; } else if (CHIP_IS_E1H(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E1H; } else if (CHIP_IS_E2(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E2 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } else if (CHIP_IS_E3A0(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } else if (CHIP_IS_E3B0(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } buf += sizeof(struct dump_header); for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { /* Skip presets with IOR */ if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) || (preset_idx == 11)) continue; - rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx); + rval = bxe_get_preset_regs(sc, sc->grc_dump, preset_idx); if (rval) break; size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); buf += size; } - bxe_pretend_func(sc, 0); ecore_clear_blocks_parity(sc); ecore_enable_blocks_parity(sc); - bxe_pretend_func(sc, 1); - ecore_clear_blocks_parity(sc); - ecore_enable_blocks_parity(sc); - - /* Return to current function */ - bxe_pretend_func(sc, SC_ABS_FUNC(sc)); - - - context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); - for (i = 0, allocated = 0; allocated < context_size; i++) { - - BLOGI(sc, "cdu_context i %d paddr %#jx vaddr %p size 0x%zx\n", i, - sc->context[i].vcxt_dma.paddr, sc->context[i].vcxt_dma.vaddr, - sc->context[i].size); - allocated += sc->context[i].size; - } - BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n", - (uintmax_t)sc->fw_stats_req_mapping, - (uintmax_t)sc->fw_stats_data_mapping, - sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size)); - BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%lx\n", - (void *)sc->def_sb_dma.paddr, sc->def_sb, - sizeof(struct host_sp_status_block)); - BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n", - sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE); - BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%lx\n", - sc->sp_dma.paddr, sc->sp_dma.vaddr, sizeof(struct bxe_slowpath)); - BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n", - sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE); - BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n", - sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, FW_BUF_SIZE); - for (i = 0; i < sc->num_queues; i++) { - fp = &sc->fp[i]; - BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%lx\n", i, - fp->sb_dma.paddr, fp->sb_dma.vaddr, - sizeof(union bxe_host_hc_status_block)); - BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, - fp->tx_dma.paddr, fp->tx_dma.vaddr, - (BCM_PAGE_SIZE * TX_BD_NUM_PAGES)); - BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, - fp->rx_dma.paddr, fp->rx_dma.vaddr, - (BCM_PAGE_SIZE * RX_BD_NUM_PAGES)); - BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%lx\n", i, - fp->rcq_dma.paddr, fp->rcq_dma.vaddr, - (BCM_PAGE_SIZE * RCQ_NUM_PAGES)); - BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, - fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr, - (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES)); - } - - ilt_cli = &ilt->clients[1]; - for (i = ilt_cli->start; i <= ilt_cli->end; i++) { - BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n", - ((struct bxe_dma *)((&ilt->lines[i])->page))->paddr, - ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE); - } - - - cmd_offset = DMAE_REG_CMD_MEM; - for (i = 0; i < 224; i++) { - reg_addr = (cmd_offset +(i * 4)); - reg_val = REG_RD(sc, reg_addr); - BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i, - reg_addr, reg_val); - } - - - BLOGI(sc, "Collection of grcdump done\n"); sc->grcdump_done = 1; return(rval); } static int bxe_add_cdev(struct bxe_softc *sc) { + int grc_dump_size; + + grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + + sizeof(struct dump_header); + + sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); + + if (sc->grc_dump == NULL) + return (-1); + sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT); if (sc->eeprom == NULL) { BLOGW(sc, "Unable to alloc for eeprom size buffer\n"); + free(sc->grc_dump, M_DEVBUF); sc->grc_dump = NULL; return (-1); } sc->ioctl_dev = make_dev(&bxe_cdevsw, sc->ifp->if_dunit, UID_ROOT, GID_WHEEL, 0600, "%s", if_name(sc->ifp)); if (sc->ioctl_dev == NULL) { + + free(sc->grc_dump, M_DEVBUF); free(sc->eeprom, M_DEVBUF); sc->eeprom = NULL; + return (-1); } sc->ioctl_dev->si_drv1 = sc; return (0); } static void bxe_del_cdev(struct bxe_softc *sc) { if (sc->ioctl_dev != NULL) destroy_dev(sc->ioctl_dev); + if (sc->grc_dump != NULL) + free(sc->grc_dump, M_DEVBUF); + if (sc->eeprom != NULL) { free(sc->eeprom, M_DEVBUF); sc->eeprom = NULL; } - sc->ioctl_dev = NULL; return; } static bool bxe_is_nvram_accessible(struct bxe_softc *sc) { if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) return FALSE; return TRUE; } static int bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) { int rval = 0; if(!bxe_is_nvram_accessible(sc)) { BLOGW(sc, "Cannot access eeprom when interface is down\n"); return (-EAGAIN); } rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len); return (rval); } static int bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) { int rval = 0; if(!bxe_is_nvram_accessible(sc)) { BLOGW(sc, "Cannot access eeprom when interface is down\n"); return (-EAGAIN); } rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len); return (rval); } static int bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom) { int rval = 0; switch (eeprom->eeprom_cmd) { case BXE_EEPROM_CMD_SET_EEPROM: rval = copyin(eeprom->eeprom_data, sc->eeprom, eeprom->eeprom_data_len); if (rval) break; rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, eeprom->eeprom_data_len); break; case BXE_EEPROM_CMD_GET_EEPROM: rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, eeprom->eeprom_data_len); if (rval) { break; } rval = copyout(sc->eeprom, eeprom->eeprom_data, eeprom->eeprom_data_len); break; default: rval = EINVAL; break; } if (rval) { BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval); } return (rval); } static int bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p) { uint32_t ext_phy_config; int port = SC_PORT(sc); int cfg_idx = bxe_get_link_cfg_idx(sc); dev_p->supported = sc->port.supported[cfg_idx] | (sc->port.supported[cfg_idx ^ 1] & (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE)); dev_p->advertising = sc->port.advertising[cfg_idx]; if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type == ELINK_ETH_PHY_SFP_1G_FIBER) { dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full); dev_p->advertising &= ~(ADVERTISED_10000baseT_Full); } if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up && !(sc->flags & BXE_MF_FUNC_DIS)) { dev_p->duplex = sc->link_vars.duplex; if (IS_MF(sc) && !BXE_NOMCP(sc)) dev_p->speed = bxe_get_mf_speed(sc); else dev_p->speed = sc->link_vars.line_speed; } else { dev_p->duplex = DUPLEX_UNKNOWN; dev_p->speed = SPEED_UNKNOWN; } dev_p->port = bxe_media_detect(sc); ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) dev_p->phy_address = sc->port.phy_addr; else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config); else dev_p->phy_address = 0; if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) dev_p->autoneg = AUTONEG_ENABLE; else dev_p->autoneg = AUTONEG_DISABLE; return 0; } static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct bxe_softc *sc; int rval = 0; device_t pci_dev; bxe_grcdump_t *dump = NULL; int grc_dump_size; bxe_drvinfo_t *drv_infop = NULL; bxe_dev_setting_t *dev_p; bxe_dev_setting_t dev_set; bxe_get_regs_t *reg_p; bxe_reg_rdw_t *reg_rdw_p; bxe_pcicfg_rdw_t *cfg_rdw_p; bxe_perm_mac_addr_t *mac_addr_p; if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) return ENXIO; pci_dev= sc->dev; dump = (bxe_grcdump_t *)data; switch(cmd) { case BXE_GRC_DUMP_SIZE: dump->pci_func = sc->pcie_func; dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); break; case BXE_GRC_DUMP: grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); - if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) || - (dump->grcdump_size < grc_dump_size)) { + + if ((sc->grc_dump == NULL) || (dump->grcdump == NULL) || + (dump->grcdump_size < grc_dump_size) || (!sc->grcdump_done)) { rval = EINVAL; break; } + dump->grcdump_dwords = grc_dump_size >> 2; + rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); + sc->grcdump_done = 0; - if((sc->trigger_grcdump) && (!sc->grcdump_done) && - (!sc->grcdump_started)) { - rval = bxe_grc_dump(sc); - } - - if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) && - (sc->grc_dump != NULL)) { - dump->grcdump_dwords = grc_dump_size >> 2; - rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); - free(sc->grc_dump, M_DEVBUF); - sc->grc_dump = NULL; - sc->grcdump_started = 0; - sc->grcdump_done = 0; - } - break; case BXE_DRV_INFO: drv_infop = (bxe_drvinfo_t *)data; snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe"); snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s", BXE_DRIVER_VERSION); snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s", sc->devinfo.bc_ver_str); snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH, "%s", sc->fw_ver_str); drv_infop->eeprom_dump_len = sc->devinfo.flash_size; drv_infop->reg_dump_len = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d", sc->pcie_bus, sc->pcie_device, sc->pcie_func); break; - case BXE_DEV_SETTING: dev_p = (bxe_dev_setting_t *)data; bxe_get_settings(sc, &dev_set); dev_p->supported = dev_set.supported; dev_p->advertising = dev_set.advertising; dev_p->speed = dev_set.speed; dev_p->duplex = dev_set.duplex; dev_p->port = dev_set.port; dev_p->phy_address = dev_set.phy_address; dev_p->autoneg = dev_set.autoneg; break; case BXE_GET_REGS: reg_p = (bxe_get_regs_t *)data; grc_dump_size = reg_p->reg_buf_len; - if((!sc->grcdump_done) && (!sc->grcdump_started)) { + if (sc->grc_dump == NULL) { + rval = EINVAL; + break; + } + + if(!sc->grcdump_done) { bxe_grc_dump(sc); } - if((sc->grcdump_done) && (sc->grcdump_started) && - (sc->grc_dump != NULL)) { + if(sc->grcdump_done) { rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size); - free(sc->grc_dump, M_DEVBUF); - sc->grc_dump = NULL; - sc->grcdump_started = 0; sc->grcdump_done = 0; } break; - case BXE_RDW_REG: reg_rdw_p = (bxe_reg_rdw_t *)data; if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) && (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id); if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) && (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val); break; case BXE_RDW_PCICFG: cfg_rdw_p = (bxe_pcicfg_rdw_t *)data; if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) { cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_width); } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) { pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val, cfg_rdw_p->cfg_width); } else { BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n"); } break; case BXE_MAC_ADDR: mac_addr_p = (bxe_perm_mac_addr_t *)data; snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s", sc->mac_addr_str); break; case BXE_EEPROM: rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data); break; default: break; } return (rval); } Index: projects/release-pkg/sys/dev/bxe/bxe.h =================================================================== --- projects/release-pkg/sys/dev/bxe/bxe.h (revision 297926) +++ projects/release-pkg/sys/dev/bxe/bxe.h (revision 297927) @@ -1,2482 +1,2478 @@ /*- * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __BXE_H__ #define __BXE_H__ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "device_if.h" #include "bus_if.h" #include "pci_if.h" #if _BYTE_ORDER == _LITTLE_ENDIAN #ifndef LITTLE_ENDIAN #define LITTLE_ENDIAN #endif #ifndef __LITTLE_ENDIAN #define __LITTLE_ENDIAN #endif #undef BIG_ENDIAN #undef __BIG_ENDIAN #else /* _BIG_ENDIAN */ #ifndef BIG_ENDIAN #define BIG_ENDIAN #endif #ifndef __BIG_ENDIAN #define __BIG_ENDIAN #endif #undef LITTLE_ENDIAN #undef __LITTLE_ENDIAN #endif #include "ecore_mfw_req.h" #include "ecore_fw_defs.h" #include "ecore_hsi.h" #include "ecore_reg.h" #include "bxe_dcb.h" #include "bxe_stats.h" #include "bxe_elink.h" #define VF_MAC_CREDIT_CNT 0 #define VF_VLAN_CREDIT_CNT (0) #if __FreeBSD_version < 800054 #if defined(__i386__) || defined(__amd64__) #define mb() __asm volatile("mfence;" : : : "memory") #define wmb() __asm volatile("sfence;" : : : "memory") #define rmb() __asm volatile("lfence;" : : : "memory") static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #else #define mb() #define rmb() #define wmb() #define prefetch(x) #endif #endif #if __FreeBSD_version >= 1000000 #define PCIR_EXPRESS_DEVICE_STA PCIER_DEVICE_STA #define PCIM_EXP_STA_TRANSACTION_PND PCIEM_STA_TRANSACTION_PND #define PCIR_EXPRESS_LINK_STA PCIER_LINK_STA #define PCIM_LINK_STA_WIDTH PCIEM_LINK_STA_WIDTH #define PCIM_LINK_STA_SPEED PCIEM_LINK_STA_SPEED #define PCIR_EXPRESS_DEVICE_CTL PCIER_DEVICE_CTL #define PCIM_EXP_CTL_MAX_PAYLOAD PCIEM_CTL_MAX_PAYLOAD #define PCIM_EXP_CTL_MAX_READ_REQUEST PCIEM_CTL_MAX_READ_REQUEST #endif #ifndef ARRAY_SIZE #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif #ifndef ARRSIZE #define ARRSIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif #ifndef DIV_ROUND_UP #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #endif #ifndef roundup #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #endif #ifndef ilog2 static inline int bxe_ilog2(int x) { int log = 0; while (x >>= 1) log++; return (log); } #define ilog2(x) bxe_ilog2(x) #endif #include "ecore_sp.h" #define BRCM_VENDORID 0x14e4 #define PCI_ANY_ID (uint16_t)(~0U) struct bxe_device_type { uint16_t bxe_vid; uint16_t bxe_did; uint16_t bxe_svid; uint16_t bxe_sdid; char *bxe_name; }; #define BCM_PAGE_SHIFT 12 #define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) #define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) #define BCM_PAGE_ALIGN(addr) ((addr + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) #if BCM_PAGE_SIZE != 4096 #error Page sizes other than 4KB are unsupported! #endif #if (BUS_SPACE_MAXADDR > 0xFFFFFFFF) #define U64_LO(addr) ((uint32_t)(((uint64_t)(addr)) & 0xFFFFFFFF)) #define U64_HI(addr) ((uint32_t)(((uint64_t)(addr)) >> 32)) #else #define U64_LO(addr) ((uint32_t)(addr)) #define U64_HI(addr) (0) #endif #define HILO_U64(hi, lo) ((((uint64_t)(hi)) << 32) + (lo)) #define SET_FLAG(value, mask, flag) \ do { \ (value) &= ~(mask); \ (value) |= ((flag) << (mask##_SHIFT)); \ } while (0) #define GET_FLAG(value, mask) \ (((value) & (mask)) >> (mask##_SHIFT)) #define GET_FIELD(value, fname) \ (((value) & (fname##_MASK)) >> (fname##_SHIFT)) #define BXE_MAX_SEGMENTS 12 /* 13-1 for parsing buffer */ #define BXE_TSO_MAX_SEGMENTS 32 #define BXE_TSO_MAX_SIZE (65535 + sizeof(struct ether_vlan_header)) #define BXE_TSO_MAX_SEG_SIZE 4096 /* dropless fc FW/HW related params */ #define BRB_SIZE(sc) (CHIP_IS_E3(sc) ? 1024 : 512) #define MAX_AGG_QS(sc) (CHIP_IS_E1(sc) ? \ ETH_MAX_AGGREGATION_QUEUES_E1 : \ ETH_MAX_AGGREGATION_QUEUES_E1H_E2) #define FW_DROP_LEVEL(sc) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(sc)) #define FW_PREFETCH_CNT 16 #define DROPLESS_FC_HEADROOM 100 /******************/ /* RX SGE defines */ /******************/ #define RX_SGE_NUM_PAGES 2 /* must be a power of 2 */ #define RX_SGE_TOTAL_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) #define RX_SGE_NEXT_PAGE_DESC_CNT 2 #define RX_SGE_USABLE_PER_PAGE (RX_SGE_TOTAL_PER_PAGE - RX_SGE_NEXT_PAGE_DESC_CNT) #define RX_SGE_PER_PAGE_MASK (RX_SGE_TOTAL_PER_PAGE - 1) #define RX_SGE_TOTAL (RX_SGE_TOTAL_PER_PAGE * RX_SGE_NUM_PAGES) #define RX_SGE_USABLE (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES) #define RX_SGE_MAX (RX_SGE_TOTAL - 1) #define RX_SGE(x) ((x) & RX_SGE_MAX) #define RX_SGE_NEXT(x) \ ((((x) & RX_SGE_PER_PAGE_MASK) == (RX_SGE_USABLE_PER_PAGE - 1)) \ ? (x) + 1 + RX_SGE_NEXT_PAGE_DESC_CNT : (x) + 1) #define RX_SGE_MASK_ELEM_SZ 64 #define RX_SGE_MASK_ELEM_SHIFT 6 #define RX_SGE_MASK_ELEM_MASK ((uint64_t)RX_SGE_MASK_ELEM_SZ - 1) /* * Creates a bitmask of all ones in less significant bits. * idx - index of the most significant bit in the created mask. */ #define RX_SGE_ONES_MASK(idx) \ (((uint64_t)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1) #define RX_SGE_MASK_ELEM_ONE_MASK ((uint64_t)(~0)) /* Number of uint64_t elements in SGE mask array. */ #define RX_SGE_MASK_LEN \ ((RX_SGE_NUM_PAGES * RX_SGE_TOTAL_PER_PAGE) / RX_SGE_MASK_ELEM_SZ) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) #define RX_SGE_NEXT_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) /* * dropless fc calculations for SGEs * Number of required SGEs is the sum of two: * 1. Number of possible opened aggregations (next packet for * these aggregations will probably consume SGE immidiatelly) * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only * after placement on BD for new TPA aggregation) * Takes into account RX_SGE_NEXT_PAGE_DESC_CNT "next" elements on each page */ #define NUM_SGE_REQ(sc) \ (MAX_AGG_QS(sc) + (BRB_SIZE(sc) - MAX_AGG_QS(sc)) / 2) #define NUM_SGE_PG_REQ(sc) \ ((NUM_SGE_REQ(sc) + RX_SGE_USABLE_PER_PAGE - 1) / RX_SGE_USABLE_PER_PAGE) #define SGE_TH_LO(sc) \ (NUM_SGE_REQ(sc) + NUM_SGE_PG_REQ(sc) * RX_SGE_NEXT_PAGE_DESC_CNT) #define SGE_TH_HI(sc) \ (SGE_TH_LO(sc) + DROPLESS_FC_HEADROOM) #define PAGES_PER_SGE_SHIFT 0 #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) #define SGE_PAGE_SIZE BCM_PAGE_SIZE #define SGE_PAGE_SHIFT BCM_PAGE_SHIFT #define SGE_PAGE_ALIGN(addr) BCM_PAGE_ALIGN(addr) #define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE) #define TPA_AGG_SIZE min((8 * SGE_PAGES), 0xffff) /*****************/ /* TX BD defines */ /*****************/ #define TX_BD_NUM_PAGES 16 /* must be a power of 2 */ #define TX_BD_TOTAL_PER_PAGE (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) #define TX_BD_USABLE_PER_PAGE (TX_BD_TOTAL_PER_PAGE - 1) #define TX_BD_TOTAL (TX_BD_TOTAL_PER_PAGE * TX_BD_NUM_PAGES) #define TX_BD_USABLE (TX_BD_USABLE_PER_PAGE * TX_BD_NUM_PAGES) #define TX_BD_MAX (TX_BD_TOTAL - 1) #define TX_BD_NEXT(x) \ ((((x) & TX_BD_USABLE_PER_PAGE) == (TX_BD_USABLE_PER_PAGE - 1)) ? \ ((x) + 2) : ((x) + 1)) #define TX_BD(x) ((x) & TX_BD_MAX) #define TX_BD_PAGE(x) (((x) & ~TX_BD_USABLE_PER_PAGE) >> 8) #define TX_BD_IDX(x) ((x) & TX_BD_USABLE_PER_PAGE) /* * Trigger pending transmits when the number of available BDs is greater * than 1/8 of the total number of usable BDs. */ #define BXE_TX_CLEANUP_THRESHOLD (TX_BD_USABLE / 8) #define BXE_TX_TIMEOUT 5 /*****************/ /* RX BD defines */ /*****************/ #define RX_BD_NUM_PAGES 8 /* power of 2 */ #define RX_BD_TOTAL_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) #define RX_BD_NEXT_PAGE_DESC_CNT 2 #define RX_BD_USABLE_PER_PAGE (RX_BD_TOTAL_PER_PAGE - RX_BD_NEXT_PAGE_DESC_CNT) #define RX_BD_PER_PAGE_MASK (RX_BD_TOTAL_PER_PAGE - 1) #define RX_BD_TOTAL (RX_BD_TOTAL_PER_PAGE * RX_BD_NUM_PAGES) #define RX_BD_USABLE (RX_BD_USABLE_PER_PAGE * RX_BD_NUM_PAGES) #define RX_BD_MAX (RX_BD_TOTAL - 1) #define RX_BD_NEXT(x) \ ((((x) & RX_BD_PER_PAGE_MASK) == (RX_BD_USABLE_PER_PAGE - 1)) ? \ ((x) + 3) : ((x) + 1)) #define RX_BD(x) ((x) & RX_BD_MAX) #define RX_BD_PAGE(x) (((x) & ~RX_BD_PER_PAGE_MASK) >> 9) #define RX_BD_IDX(x) ((x) & RX_BD_PER_PAGE_MASK) /* * dropless fc calculations for BDs * Number of BDs should be as number of buffers in BRB: * Low threshold takes into account RX_BD_NEXT_PAGE_DESC_CNT * "next" elements on each page */ #define NUM_BD_REQ(sc) \ BRB_SIZE(sc) #define NUM_BD_PG_REQ(sc) \ ((NUM_BD_REQ(sc) + RX_BD_USABLE_PER_PAGE - 1) / RX_BD_USABLE_PER_PAGE) #define BD_TH_LO(sc) \ (NUM_BD_REQ(sc) + \ NUM_BD_PG_REQ(sc) * RX_BD_NEXT_PAGE_DESC_CNT + \ FW_DROP_LEVEL(sc)) #define BD_TH_HI(sc) \ (BD_TH_LO(sc) + DROPLESS_FC_HEADROOM) #define MIN_RX_AVAIL(sc) \ ((sc)->dropless_fc ? BD_TH_HI(sc) + 128 : 128) #define MIN_RX_SIZE_TPA_HW(sc) \ (CHIP_IS_E1(sc) ? ETH_MIN_RX_CQES_WITH_TPA_E1 : \ ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) #define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA #define MIN_RX_SIZE_TPA(sc) \ (max(MIN_RX_SIZE_TPA_HW(sc), MIN_RX_AVAIL(sc))) #define MIN_RX_SIZE_NONTPA(sc) \ (max(MIN_RX_SIZE_NONTPA_HW, MIN_RX_AVAIL(sc))) /***************/ /* RCQ defines */ /***************/ /* * As long as CQE is X times bigger than BD entry we have to allocate X times * more pages for CQ ring in order to keep it balanced with BD ring */ #define CQE_BD_REL (sizeof(union eth_rx_cqe) / \ sizeof(struct eth_rx_bd)) #define RCQ_NUM_PAGES (RX_BD_NUM_PAGES * CQE_BD_REL) /* power of 2 */ #define RCQ_TOTAL_PER_PAGE (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) #define RCQ_NEXT_PAGE_DESC_CNT 1 #define RCQ_USABLE_PER_PAGE (RCQ_TOTAL_PER_PAGE - RCQ_NEXT_PAGE_DESC_CNT) #define RCQ_TOTAL (RCQ_TOTAL_PER_PAGE * RCQ_NUM_PAGES) #define RCQ_USABLE (RCQ_USABLE_PER_PAGE * RCQ_NUM_PAGES) #define RCQ_MAX (RCQ_TOTAL - 1) #define RCQ_NEXT(x) \ ((((x) & RCQ_USABLE_PER_PAGE) == (RCQ_USABLE_PER_PAGE - 1)) ? \ ((x) + 1 + RCQ_NEXT_PAGE_DESC_CNT) : ((x) + 1)) #define RCQ(x) ((x) & RCQ_MAX) #define RCQ_PAGE(x) (((x) & ~RCQ_USABLE_PER_PAGE) >> 7) #define RCQ_IDX(x) ((x) & RCQ_USABLE_PER_PAGE) /* * dropless fc calculations for RCQs * Number of RCQs should be as number of buffers in BRB: * Low threshold takes into account RCQ_NEXT_PAGE_DESC_CNT * "next" elements on each page */ #define NUM_RCQ_REQ(sc) \ BRB_SIZE(sc) #define NUM_RCQ_PG_REQ(sc) \ ((NUM_RCQ_REQ(sc) + RCQ_USABLE_PER_PAGE - 1) / RCQ_USABLE_PER_PAGE) #define RCQ_TH_LO(sc) \ (NUM_RCQ_REQ(sc) + \ NUM_RCQ_PG_REQ(sc) * RCQ_NEXT_PAGE_DESC_CNT + \ FW_DROP_LEVEL(sc)) #define RCQ_TH_HI(sc) \ (RCQ_TH_LO(sc) + DROPLESS_FC_HEADROOM) /* This is needed for determening of last_max */ #define SUB_S16(a, b) (int16_t)((int16_t)(a) - (int16_t)(b)) #define __SGE_MASK_SET_BIT(el, bit) \ do { \ (el) = ((el) | ((uint64_t)0x1 << (bit))); \ } while (0) #define __SGE_MASK_CLEAR_BIT(el, bit) \ do { \ (el) = ((el) & (~((uint64_t)0x1 << (bit)))); \ } while (0) #define SGE_MASK_SET_BIT(fp, idx) \ __SGE_MASK_SET_BIT((fp)->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \ ((idx) & RX_SGE_MASK_ELEM_MASK)) #define SGE_MASK_CLEAR_BIT(fp, idx) \ __SGE_MASK_CLEAR_BIT((fp)->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \ ((idx) & RX_SGE_MASK_ELEM_MASK)) /* Load / Unload modes */ #define LOAD_NORMAL 0 #define LOAD_OPEN 1 #define LOAD_DIAG 2 #define LOAD_LOOPBACK_EXT 3 #define UNLOAD_NORMAL 0 #define UNLOAD_CLOSE 1 #define UNLOAD_RECOVERY 2 /* Some constants... */ //#define MAX_PATH_NUM 2 //#define E2_MAX_NUM_OF_VFS 64 //#define E1H_FUNC_MAX 8 //#define E2_FUNC_MAX 4 /* per path */ #define MAX_VNIC_NUM 4 #define MAX_FUNC_NUM 8 /* common to all chips */ //#define MAX_NDSB HC_SB_MAX_SB_E2 /* max non-default status block */ #define MAX_RSS_CHAINS 16 /* a constant for HW limit */ #define MAX_MSI_VECTOR 8 /* a constant for HW limit */ #define ILT_NUM_PAGE_ENTRIES 3072 /* * 57710/11 we use whole table since we have 8 functions. * 57712 we have only 4 functions, but use same size per func, so only half * of the table is used. */ #define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES / 8) #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) /* * the phys address is shifted right 12 bits and has an added * 1=valid bit added to the 53rd bit * then since this is a wide register(TM) * we split it into two 32 bit writes */ #define ONCHIP_ADDR1(x) ((uint32_t)(((uint64_t)x >> 12) & 0xFFFFFFFF)) #define ONCHIP_ADDR2(x) ((uint32_t)((1 << 20) | ((uint64_t)x >> 44))) /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ #define ETH_HLEN 14 #define ETH_OVERHEAD (ETH_HLEN + 8 + 8) #define ETH_MIN_PACKET_SIZE 60 #define ETH_MAX_PACKET_SIZE ETHERMTU /* 1500 */ #define ETH_MAX_JUMBO_PACKET_SIZE 9600 /* TCP with Timestamp Option (32) + IPv6 (40) */ #define ETH_MAX_TPA_HEADER_SIZE 72 /* max supported alignment is 256 (8 shift) */ //#define BXE_RX_ALIGN_SHIFT ((CACHE_LINE_SHIFT < 8) ? CACHE_LINE_SHIFT : 8) #define BXE_RX_ALIGN_SHIFT 8 /* FW uses 2 cache lines alignment for start packet and size */ #define BXE_FW_RX_ALIGN_START (1 << BXE_RX_ALIGN_SHIFT) #define BXE_FW_RX_ALIGN_END (1 << BXE_RX_ALIGN_SHIFT) #define BXE_PXP_DRAM_ALIGN (BXE_RX_ALIGN_SHIFT - 5) /* XXX ??? */ struct bxe_bar { struct resource *resource; int rid; bus_space_tag_t tag; bus_space_handle_t handle; vm_offset_t kva; }; struct bxe_intr { struct resource *resource; int rid; void *tag; }; /* Used to manage DMA allocations. */ struct bxe_dma { struct bxe_softc *sc; bus_addr_t paddr; void *vaddr; bus_dma_tag_t tag; bus_dmamap_t map; bus_dma_segment_t seg; bus_size_t size; int nseg; char msg[32]; }; /* attn group wiring */ #define MAX_DYNAMIC_ATTN_GRPS 8 struct attn_route { uint32_t sig[5]; }; struct iro { uint32_t base; uint16_t m1; uint16_t m2; uint16_t m3; uint16_t size; }; union bxe_host_hc_status_block { /* pointer to fp status block e2 */ struct host_hc_status_block_e2 *e2_sb; /* pointer to fp status block e1x */ struct host_hc_status_block_e1x *e1x_sb; }; union bxe_db_prod { struct doorbell_set_prod data; uint32_t raw; }; struct bxe_sw_tx_bd { struct mbuf *m; bus_dmamap_t m_map; uint16_t first_bd; uint8_t flags; /* set on the first BD descriptor when there is a split BD */ #define BXE_TSO_SPLIT_BD (1 << 0) }; struct bxe_sw_rx_bd { struct mbuf *m; bus_dmamap_t m_map; }; struct bxe_sw_tpa_info { struct bxe_sw_rx_bd bd; bus_dma_segment_t seg; uint8_t state; #define BXE_TPA_STATE_START 1 #define BXE_TPA_STATE_STOP 2 uint8_t placement_offset; uint16_t parsing_flags; uint16_t vlan_tag; uint16_t len_on_bd; }; /* * This is the HSI fastpath data structure. There can be up to MAX_RSS_CHAIN * instances of the fastpath structure when using multiple queues. */ struct bxe_fastpath { /* pointer back to parent structure */ struct bxe_softc *sc; struct mtx tx_mtx; char tx_mtx_name[32]; struct mtx rx_mtx; char rx_mtx_name[32]; #define BXE_FP_TX_LOCK(fp) mtx_lock(&fp->tx_mtx) #define BXE_FP_TX_UNLOCK(fp) mtx_unlock(&fp->tx_mtx) #define BXE_FP_TX_LOCK_ASSERT(fp) mtx_assert(&fp->tx_mtx, MA_OWNED) #define BXE_FP_TX_TRYLOCK(fp) mtx_trylock(&fp->tx_mtx) #define BXE_FP_RX_LOCK(fp) mtx_lock(&fp->rx_mtx) #define BXE_FP_RX_UNLOCK(fp) mtx_unlock(&fp->rx_mtx) #define BXE_FP_RX_LOCK_ASSERT(fp) mtx_assert(&fp->rx_mtx, MA_OWNED) /* status block */ struct bxe_dma sb_dma; union bxe_host_hc_status_block status_block; /* transmit chain (tx bds) */ struct bxe_dma tx_dma; union eth_tx_bd_types *tx_chain; /* receive chain (rx bds) */ struct bxe_dma rx_dma; struct eth_rx_bd *rx_chain; /* receive completion queue chain (rcq bds) */ struct bxe_dma rcq_dma; union eth_rx_cqe *rcq_chain; /* receive scatter/gather entry chain (for TPA) */ struct bxe_dma rx_sge_dma; struct eth_rx_sge *rx_sge_chain; /* tx mbufs */ bus_dma_tag_t tx_mbuf_tag; struct bxe_sw_tx_bd tx_mbuf_chain[TX_BD_TOTAL]; /* rx mbufs */ bus_dma_tag_t rx_mbuf_tag; struct bxe_sw_rx_bd rx_mbuf_chain[RX_BD_TOTAL]; bus_dmamap_t rx_mbuf_spare_map; /* rx sge mbufs */ bus_dma_tag_t rx_sge_mbuf_tag; struct bxe_sw_rx_bd rx_sge_mbuf_chain[RX_SGE_TOTAL]; bus_dmamap_t rx_sge_mbuf_spare_map; /* rx tpa mbufs (use the larger size for TPA queue length) */ int tpa_enable; /* disabled per fastpath upon error */ struct bxe_sw_tpa_info rx_tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; bus_dmamap_t rx_tpa_info_mbuf_spare_map; uint64_t rx_tpa_queue_used; uint16_t *sb_index_values; uint16_t *sb_running_index; uint32_t ustorm_rx_prods_offset; uint8_t igu_sb_id; /* status block number in HW */ uint8_t fw_sb_id; /* status block number in FW */ uint32_t rx_buf_size; int mbuf_alloc_size; int state; #define BXE_FP_STATE_CLOSED 0x01 #define BXE_FP_STATE_IRQ 0x02 #define BXE_FP_STATE_OPENING 0x04 #define BXE_FP_STATE_OPEN 0x08 #define BXE_FP_STATE_HALTING 0x10 #define BXE_FP_STATE_HALTED 0x20 /* reference back to this fastpath queue number */ uint8_t index; /* this is also the 'cid' */ #define FP_IDX(fp) (fp->index) /* interrupt taskqueue (fast) */ struct task tq_task; struct taskqueue *tq; char tq_name[32]; /* ethernet client ID (each fastpath set of RX/TX/CQE is a client) */ uint8_t cl_id; #define FP_CL_ID(fp) (fp->cl_id) uint8_t cl_qzone_id; uint16_t fp_hc_idx; /* driver copy of the receive buffer descriptor prod/cons indices */ uint16_t rx_bd_prod; uint16_t rx_bd_cons; /* driver copy of the receive completion queue prod/cons indices */ uint16_t rx_cq_prod; uint16_t rx_cq_cons; union bxe_db_prod tx_db; /* Transmit packet producer index (used in eth_tx_bd). */ uint16_t tx_pkt_prod; uint16_t tx_pkt_cons; /* Transmit buffer descriptor producer index. */ uint16_t tx_bd_prod; uint16_t tx_bd_cons; uint64_t sge_mask[RX_SGE_MASK_LEN]; uint16_t rx_sge_prod; struct tstorm_per_queue_stats old_tclient; struct ustorm_per_queue_stats old_uclient; struct xstorm_per_queue_stats old_xclient; struct bxe_eth_q_stats eth_q_stats; struct bxe_eth_q_stats_old eth_q_stats_old; /* Pointer to the receive consumer in the status block */ uint16_t *rx_cq_cons_sb; /* Pointer to the transmit consumer in the status block */ uint16_t *tx_cons_sb; /* transmit timeout until chip reset */ int watchdog_timer; /* Free/used buffer descriptor counters. */ //uint16_t used_tx_bd; /* Last maximal completed SGE */ uint16_t last_max_sge; //uint16_t rx_sge_free_idx; //uint8_t segs; #if __FreeBSD_version >= 800000 #define BXE_BR_SIZE 4096 struct buf_ring *tx_br; #endif }; /* struct bxe_fastpath */ /* sriov XXX */ #define BXE_MAX_NUM_OF_VFS 64 #define BXE_VF_CID_WND 0 #define BXE_CIDS_PER_VF (1 << BXE_VF_CID_WND) #define BXE_CLIENTS_PER_VF 1 #define BXE_FIRST_VF_CID 256 #define BXE_VF_CIDS (BXE_MAX_NUM_OF_VFS * BXE_CIDS_PER_VF) #define BXE_VF_ID_INVALID 0xFF #define IS_SRIOV(sc) 0 #define GET_NUM_VFS_PER_PATH(sc) 0 #define GET_NUM_VFS_PER_PF(sc) 0 /* maximum number of fast-path interrupt contexts */ #define FP_SB_MAX_E1x 16 #define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 union cdu_context { struct eth_context eth; char pad[1024]; }; /* CDU host DB constants */ #define CDU_ILT_PAGE_SZ_HW 2 #define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */ #define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) #define CNIC_ISCSI_CID_MAX 256 #define CNIC_FCOE_CID_MAX 2048 #define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) #define QM_ILT_PAGE_SZ_HW 0 #define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ #define QM_CID_ROUND 1024 /* TM (timers) host DB constants */ #define TM_ILT_PAGE_SZ_HW 0 #define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ /*#define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ #define TM_CONN_NUM 1024 #define TM_ILT_SZ (8 * TM_CONN_NUM) #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) /* SRC (Searcher) host DB constants */ #define SRC_ILT_PAGE_SZ_HW 0 #define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ #define SRC_HASH_BITS 10 #define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ #define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) #define SRC_T2_SZ SRC_ILT_SZ #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) struct hw_context { struct bxe_dma vcxt_dma; union cdu_context *vcxt; //bus_addr_t cxt_mapping; size_t size; }; #define SM_RX_ID 0 #define SM_TX_ID 1 /* defines for multiple tx priority indices */ #define FIRST_TX_ONLY_COS_INDEX 1 #define FIRST_TX_COS_INDEX 0 #define CID_TO_FP(cid, sc) ((cid) % BXE_NUM_NON_CNIC_QUEUES(sc)) #define HC_INDEX_ETH_RX_CQ_CONS 1 #define HC_INDEX_OOO_TX_CQ_CONS 4 #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 /* congestion management fairness mode */ #define CMNG_FNS_NONE 0 #define CMNG_FNS_MINMAX 1 /* CMNG constants, as derived from system spec calculations */ /* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ #define DEF_MIN_RATE 100 /* resolution of the rate shaping timer - 400 usec */ #define RS_PERIODIC_TIMEOUT_USEC 400 /* number of bytes in single QM arbitration cycle - * coefficient for calculating the fairness timer */ #define QM_ARB_BYTES 160000 /* resolution of Min algorithm 1:100 */ #define MIN_RES 100 /* how many bytes above threshold for the minimal credit of Min algorithm*/ #define MIN_ABOVE_THRESH 32768 /* fairness algorithm integration time coefficient - * for calculating the actual Tfair */ #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) /* memory of fairness algorithm - 2 cycles */ #define FAIR_MEM 2 #define HC_SEG_ACCESS_DEF 0 /* Driver decision 0-3 */ #define HC_SEG_ACCESS_ATTN 4 #define HC_SEG_ACCESS_NORM 0 /* Driver decision 0-1 */ /* * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is * control by the number of fast-path status blocks supported by the * device (HW/FW). Each fast-path status block (FP-SB) aka non-default * status block represents an independent interrupts context that can * serve a regular L2 networking queue. However special L2 queues such * as the FCoE queue do not require a FP-SB and other components like * the CNIC may consume FP-SB reducing the number of possible L2 queues * * If the maximum number of FP-SB available is X then: * a. If CNIC is supported it consumes 1 FP-SB thus the max number of * regular L2 queues is Y=X-1 * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) * c. If the FCoE L2 queue is supported the actual number of L2 queues * is Y+1 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for * slow-path interrupts) or Y+2 if CNIC is supported (one additional * FP interrupt context for the CNIC). * e. The number of HW context (CID count) is always X or X+1 if FCoE * L2 queue is supported. the cid for the FCoE L2 queue is always X. * * So this is quite simple for now as no ULPs are supported yet. :-) */ #define BXE_NUM_QUEUES(sc) ((sc)->num_queues) #define BXE_NUM_ETH_QUEUES(sc) BXE_NUM_QUEUES(sc) #define BXE_NUM_NON_CNIC_QUEUES(sc) BXE_NUM_QUEUES(sc) #define BXE_NUM_RX_QUEUES(sc) BXE_NUM_QUEUES(sc) #define FOR_EACH_QUEUE(sc, var) \ for ((var) = 0; (var) < BXE_NUM_QUEUES(sc); (var)++) #define FOR_EACH_NONDEFAULT_QUEUE(sc, var) \ for ((var) = 1; (var) < BXE_NUM_QUEUES(sc); (var)++) #define FOR_EACH_ETH_QUEUE(sc, var) \ for ((var) = 0; (var) < BXE_NUM_ETH_QUEUES(sc); (var)++) #define FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, var) \ for ((var) = 1; (var) < BXE_NUM_ETH_QUEUES(sc); (var)++) #define FOR_EACH_COS_IN_TX_QUEUE(sc, var) \ for ((var) = 0; (var) < (sc)->max_cos; (var)++) #define FOR_EACH_CNIC_QUEUE(sc, var) \ for ((var) = BXE_NUM_ETH_QUEUES(sc); \ (var) < BXE_NUM_QUEUES(sc); \ (var)++) enum { OOO_IDX_OFFSET, FCOE_IDX_OFFSET, FWD_IDX_OFFSET, }; #define FCOE_IDX(sc) (BXE_NUM_NON_CNIC_QUEUES(sc) + FCOE_IDX_OFFSET) #define bxe_fcoe_fp(sc) (&sc->fp[FCOE_IDX(sc)]) #define bxe_fcoe(sc, var) (bxe_fcoe_fp(sc)->var) #define bxe_fcoe_inner_sp_obj(sc) (&sc->sp_objs[FCOE_IDX(sc)]) #define bxe_fcoe_sp_obj(sc, var) (bxe_fcoe_inner_sp_obj(sc)->var) #define bxe_fcoe_tx(sc, var) (bxe_fcoe_fp(sc)->txdata_ptr[FIRST_TX_COS_INDEX]->var) #define OOO_IDX(sc) (BXE_NUM_NON_CNIC_QUEUES(sc) + OOO_IDX_OFFSET) #define bxe_ooo_fp(sc) (&sc->fp[OOO_IDX(sc)]) #define bxe_ooo(sc, var) (bxe_ooo_fp(sc)->var) #define bxe_ooo_inner_sp_obj(sc) (&sc->sp_objs[OOO_IDX(sc)]) #define bxe_ooo_sp_obj(sc, var) (bxe_ooo_inner_sp_obj(sc)->var) #define FWD_IDX(sc) (BXE_NUM_NON_CNIC_QUEUES(sc) + FWD_IDX_OFFSET) #define bxe_fwd_fp(sc) (&sc->fp[FWD_IDX(sc)]) #define bxe_fwd(sc, var) (bxe_fwd_fp(sc)->var) #define bxe_fwd_inner_sp_obj(sc) (&sc->sp_objs[FWD_IDX(sc)]) #define bxe_fwd_sp_obj(sc, var) (bxe_fwd_inner_sp_obj(sc)->var) #define bxe_fwd_txdata(fp) (fp->txdata_ptr[FIRST_TX_COS_INDEX]) #define IS_ETH_FP(fp) ((fp)->index < BXE_NUM_ETH_QUEUES((fp)->sc)) #define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->sc)) #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(sc)) #define IS_FWD_FP(fp) ((fp)->index == FWD_IDX((fp)->sc)) #define IS_FWD_IDX(idx) ((idx) == FWD_IDX(sc)) #define IS_OOO_FP(fp) ((fp)->index == OOO_IDX((fp)->sc)) #define IS_OOO_IDX(idx) ((idx) == OOO_IDX(sc)) enum { BXE_PORT_QUERY_IDX, BXE_PF_QUERY_IDX, BXE_FCOE_QUERY_IDX, BXE_FIRST_QUEUE_QUERY_IDX, }; struct bxe_fw_stats_req { struct stats_query_header hdr; struct stats_query_entry query[FP_SB_MAX_E1x + BXE_FIRST_QUEUE_QUERY_IDX]; }; struct bxe_fw_stats_data { struct stats_counter storm_counters; struct per_port_stats port; struct per_pf_stats pf; //struct fcoe_statistics_params fcoe; struct per_queue_stats queue_stats[1]; }; /* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ #define BXE_IGU_STAS_MSG_VF_CNT 64 #define BXE_IGU_STAS_MSG_PF_CNT 4 #define MAX_DMAE_C 8 /* * For the main interface up/down code paths, a not-so-fine-grained CORE * mutex lock is used. Inside this code are various calls to kernel routines * that can cause a sleep to occur. Namely memory allocations and taskqueue * handling. If using an MTX lock we are *not* allowed to sleep but we can * with an SX lock. This define forces the CORE lock to use and SX lock. * Undefine this and an MTX lock will be used instead. Note that the IOCTL * path can cause problems since it's called by a non-sleepable thread. To * alleviate a potential sleep, any IOCTL processing that results in the * chip/interface being started/stopped/reinitialized, the actual work is * offloaded to a taskqueue. */ #define BXE_CORE_LOCK_SX /* * This is the slowpath data structure. It is mapped into non-paged memory * so that the hardware can access it's contents directly and must be page * aligned. */ struct bxe_slowpath { /* used by the DMAE command executer */ struct dmae_cmd dmae[MAX_DMAE_C]; /* statistics completion */ uint32_t stats_comp; /* firmware defined statistics blocks */ union mac_stats mac_stats; struct nig_stats nig_stats; struct host_port_stats port_stats; struct host_func_stats func_stats; //struct host_func_stats func_stats_base; /* DMAE completion value and data source/sink */ uint32_t wb_comp; uint32_t wb_data[4]; union { struct mac_configuration_cmd e1x; struct eth_classify_rules_ramrod_data e2; } mac_rdata; union { struct tstorm_eth_mac_filter_config e1x; struct eth_filter_rules_ramrod_data e2; } rx_mode_rdata; struct eth_rss_update_ramrod_data rss_rdata; union { struct mac_configuration_cmd e1; struct eth_multicast_rules_ramrod_data e2; } mcast_rdata; union { struct function_start_data func_start; struct flow_control_configuration pfc_config; /* for DCBX ramrod */ } func_rdata; /* Queue State related ramrods */ union { struct client_init_ramrod_data init_data; struct client_update_ramrod_data update_data; } q_rdata; /* * AFEX ramrod can not be a part of func_rdata union because these * events might arrive in parallel to other events from func_rdata. * If they were defined in the same union the data can get corrupted. */ struct afex_vif_list_ramrod_data func_afex_rdata; union drv_info_to_mcp drv_info_to_mcp; }; /* struct bxe_slowpath */ /* * Port specifc data structure. */ struct bxe_port { /* * Port Management Function (for 57711E only). * When this field is set the driver instance is * responsible for managing port specifc * configurations such as handling link attentions. */ uint32_t pmf; /* Ethernet maximum transmission unit. */ uint16_t ether_mtu; uint32_t link_config[ELINK_LINK_CONFIG_SIZE]; uint32_t ext_phy_config; /* Port feature config.*/ uint32_t config; /* Defines the features supported by the PHY. */ uint32_t supported[ELINK_LINK_CONFIG_SIZE]; /* Defines the features advertised by the PHY. */ uint32_t advertising[ELINK_LINK_CONFIG_SIZE]; #define ADVERTISED_10baseT_Half (1 << 1) #define ADVERTISED_10baseT_Full (1 << 2) #define ADVERTISED_100baseT_Half (1 << 3) #define ADVERTISED_100baseT_Full (1 << 4) #define ADVERTISED_1000baseT_Half (1 << 5) #define ADVERTISED_1000baseT_Full (1 << 6) #define ADVERTISED_TP (1 << 7) #define ADVERTISED_FIBRE (1 << 8) #define ADVERTISED_Autoneg (1 << 9) #define ADVERTISED_Asym_Pause (1 << 10) #define ADVERTISED_Pause (1 << 11) #define ADVERTISED_2500baseX_Full (1 << 15) #define ADVERTISED_10000baseT_Full (1 << 16) uint32_t phy_addr; /* Used to synchronize phy accesses. */ struct mtx phy_mtx; char phy_mtx_name[32]; #define BXE_PHY_LOCK(sc) mtx_lock(&sc->port.phy_mtx) #define BXE_PHY_UNLOCK(sc) mtx_unlock(&sc->port.phy_mtx) #define BXE_PHY_LOCK_ASSERT(sc) mtx_assert(&sc->port.phy_mtx, MA_OWNED) /* * MCP scratchpad address for port specific statistics. * The device is responsible for writing statistcss * back to the MCP for use with management firmware such * as UMP/NC-SI. */ uint32_t port_stx; struct nig_stats old_nig_stats; }; /* struct bxe_port */ struct bxe_mf_info { uint32_t mf_config[E1HVN_MAX]; uint32_t vnics_per_port; /* 1, 2 or 4 */ uint32_t multi_vnics_mode; /* can be set even if vnics_per_port = 1 */ uint32_t path_has_ovlan; /* MF mode in the path (can be different than the MF mode of the function */ #define IS_MULTI_VNIC(sc) ((sc)->devinfo.mf_info.multi_vnics_mode) #define VNICS_PER_PORT(sc) ((sc)->devinfo.mf_info.vnics_per_port) #define VNICS_PER_PATH(sc) \ ((sc)->devinfo.mf_info.vnics_per_port * \ ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 1 )) uint8_t min_bw[MAX_VNIC_NUM]; uint8_t max_bw[MAX_VNIC_NUM]; uint16_t ext_id; /* vnic outer vlan or VIF ID */ #define VALID_OVLAN(ovlan) ((ovlan) <= 4096) #define INVALID_VIF_ID 0xFFFF #define OVLAN(sc) ((sc)->devinfo.mf_info.ext_id) #define VIF_ID(sc) ((sc)->devinfo.mf_info.ext_id) uint16_t default_vlan; #define NIV_DEFAULT_VLAN(sc) ((sc)->devinfo.mf_info.default_vlan) uint8_t niv_allowed_priorities; #define NIV_ALLOWED_PRIORITIES(sc) ((sc)->devinfo.mf_info.niv_allowed_priorities) uint8_t niv_default_cos; #define NIV_DEFAULT_COS(sc) ((sc)->devinfo.mf_info.niv_default_cos) uint8_t niv_mba_enabled; enum mf_cfg_afex_vlan_mode afex_vlan_mode; #define AFEX_VLAN_MODE(sc) ((sc)->devinfo.mf_info.afex_vlan_mode) int afex_def_vlan_tag; uint32_t pending_max; uint16_t flags; #define MF_INFO_VALID_MAC 0x0001 uint8_t mf_mode; /* Switch-Dependent or Switch-Independent */ #define IS_MF(sc) \ (IS_MULTI_VNIC(sc) && \ ((sc)->devinfo.mf_info.mf_mode != 0)) #define IS_MF_SD(sc) \ (IS_MULTI_VNIC(sc) && \ ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)) #define IS_MF_SI(sc) \ (IS_MULTI_VNIC(sc) && \ ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)) #define IS_MF_AFEX(sc) \ (IS_MULTI_VNIC(sc) && \ ((sc)->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX)) #define IS_MF_SD_MODE(sc) IS_MF_SD(sc) #define IS_MF_SI_MODE(sc) IS_MF_SI(sc) #define IS_MF_AFEX_MODE(sc) IS_MF_AFEX(sc) uint32_t mf_protos_supported; #define MF_PROTO_SUPPORT_ETHERNET 0x1 #define MF_PROTO_SUPPORT_ISCSI 0x2 #define MF_PROTO_SUPPORT_FCOE 0x4 }; /* struct bxe_mf_info */ /* Device information data structure. */ struct bxe_devinfo { /* PCIe info */ uint16_t vendor_id; uint16_t device_id; uint16_t subvendor_id; uint16_t subdevice_id; /* * chip_id = 0b'CCCCCCCCCCCCCCCCRRRRMMMMMMMMBBBB' * C = Chip Number (bits 16-31) * R = Chip Revision (bits 12-15) * M = Chip Metal (bits 4-11) * B = Chip Bond ID (bits 0-3) */ uint32_t chip_id; #define CHIP_ID(sc) ((sc)->devinfo.chip_id & 0xffff0000) #define CHIP_NUM(sc) ((sc)->devinfo.chip_id >> 16) /* device ids */ #define CHIP_NUM_57710 0x164e #define CHIP_NUM_57711 0x164f #define CHIP_NUM_57711E 0x1650 #define CHIP_NUM_57712 0x1662 #define CHIP_NUM_57712_MF 0x1663 #define CHIP_NUM_57712_VF 0x166f #define CHIP_NUM_57800 0x168a #define CHIP_NUM_57800_MF 0x16a5 #define CHIP_NUM_57800_VF 0x16a9 #define CHIP_NUM_57810 0x168e #define CHIP_NUM_57810_MF 0x16ae #define CHIP_NUM_57810_VF 0x16af #define CHIP_NUM_57811 0x163d #define CHIP_NUM_57811_MF 0x163e #define CHIP_NUM_57811_VF 0x163f #define CHIP_NUM_57840_OBS 0x168d #define CHIP_NUM_57840_OBS_MF 0x16ab #define CHIP_NUM_57840_4_10 0x16a1 #define CHIP_NUM_57840_2_20 0x16a2 #define CHIP_NUM_57840_MF 0x16a4 #define CHIP_NUM_57840_VF 0x16ad #define CHIP_REV_SHIFT 12 #define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) #define CHIP_REV(sc) ((sc)->devinfo.chip_id & CHIP_REV_MASK) #define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) #define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) #define CHIP_REV_Cx (0x2 << CHIP_REV_SHIFT) #define CHIP_REV_IS_SLOW(sc) \ (CHIP_REV(sc) > 0x00005000) #define CHIP_REV_IS_FPGA(sc) \ (CHIP_REV_IS_SLOW(sc) && (CHIP_REV(sc) & 0x00001000)) #define CHIP_REV_IS_EMUL(sc) \ (CHIP_REV_IS_SLOW(sc) && !(CHIP_REV(sc) & 0x00001000)) #define CHIP_REV_IS_ASIC(sc) \ (!CHIP_REV_IS_SLOW(sc)) #define CHIP_METAL(sc) ((sc->devinfo.chip_id) & 0x00000ff0) #define CHIP_BOND_ID(sc) ((sc->devinfo.chip_id) & 0x0000000f) #define CHIP_IS_E1(sc) (CHIP_NUM(sc) == CHIP_NUM_57710) #define CHIP_IS_57710(sc) (CHIP_NUM(sc) == CHIP_NUM_57710) #define CHIP_IS_57711(sc) (CHIP_NUM(sc) == CHIP_NUM_57711) #define CHIP_IS_57711E(sc) (CHIP_NUM(sc) == CHIP_NUM_57711E) #define CHIP_IS_E1H(sc) ((CHIP_IS_57711(sc)) || \ (CHIP_IS_57711E(sc))) #define CHIP_IS_E1x(sc) (CHIP_IS_E1((sc)) || \ CHIP_IS_E1H((sc))) #define CHIP_IS_57712(sc) (CHIP_NUM(sc) == CHIP_NUM_57712) #define CHIP_IS_57712_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57712_MF) #define CHIP_IS_57712_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57712_VF) #define CHIP_IS_E2(sc) (CHIP_IS_57712(sc) || \ CHIP_IS_57712_MF(sc)) #define CHIP_IS_57800(sc) (CHIP_NUM(sc) == CHIP_NUM_57800) #define CHIP_IS_57800_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57800_MF) #define CHIP_IS_57800_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57800_VF) #define CHIP_IS_57810(sc) (CHIP_NUM(sc) == CHIP_NUM_57810) #define CHIP_IS_57810_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57810_MF) #define CHIP_IS_57810_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57810_VF) #define CHIP_IS_57811(sc) (CHIP_NUM(sc) == CHIP_NUM_57811) #define CHIP_IS_57811_MF(sc) (CHIP_NUM(sc) == CHIP_NUM_57811_MF) #define CHIP_IS_57811_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57811_VF) #define CHIP_IS_57840(sc) ((CHIP_NUM(sc) == CHIP_NUM_57840_OBS) || \ (CHIP_NUM(sc) == CHIP_NUM_57840_4_10) || \ (CHIP_NUM(sc) == CHIP_NUM_57840_2_20)) #define CHIP_IS_57840_MF(sc) ((CHIP_NUM(sc) == CHIP_NUM_57840_OBS_MF) || \ (CHIP_NUM(sc) == CHIP_NUM_57840_MF)) #define CHIP_IS_57840_VF(sc) (CHIP_NUM(sc) == CHIP_NUM_57840_VF) #define CHIP_IS_E3(sc) (CHIP_IS_57800(sc) || \ CHIP_IS_57800_MF(sc) || \ CHIP_IS_57800_VF(sc) || \ CHIP_IS_57810(sc) || \ CHIP_IS_57810_MF(sc) || \ CHIP_IS_57810_VF(sc) || \ CHIP_IS_57811(sc) || \ CHIP_IS_57811_MF(sc) || \ CHIP_IS_57811_VF(sc) || \ CHIP_IS_57840(sc) || \ CHIP_IS_57840_MF(sc) || \ CHIP_IS_57840_VF(sc)) #define CHIP_IS_E3A0(sc) (CHIP_IS_E3(sc) && \ (CHIP_REV(sc) == CHIP_REV_Ax)) #define CHIP_IS_E3B0(sc) (CHIP_IS_E3(sc) && \ (CHIP_REV(sc) == CHIP_REV_Bx)) #define USES_WARPCORE(sc) (CHIP_IS_E3(sc)) #define CHIP_IS_E2E3(sc) (CHIP_IS_E2(sc) || \ CHIP_IS_E3(sc)) #define CHIP_IS_MF_CAP(sc) (CHIP_IS_57711E(sc) || \ CHIP_IS_57712_MF(sc) || \ CHIP_IS_E3(sc)) #define IS_VF(sc) (CHIP_IS_57712_VF(sc) || \ CHIP_IS_57800_VF(sc) || \ CHIP_IS_57810_VF(sc) || \ CHIP_IS_57840_VF(sc)) #define IS_PF(sc) (!IS_VF(sc)) /* * This define is used in two main places: * 1. In the early stages of nic_load, to know if to configure Parser/Searcher * to nic-only mode or to offload mode. Offload mode is configured if either * the chip is E1x (where NIC_MODE register is not applicable), or if cnic * already registered for this port (which means that the user wants storage * services). * 2. During cnic-related load, to know if offload mode is already configured * in the HW or needs to be configrued. Since the transition from nic-mode to * offload-mode in HW causes traffic coruption, nic-mode is configured only * in ports on which storage services where never requested. */ #define CONFIGURE_NIC_MODE(sc) (!CHIP_IS_E1x(sc) && !CNIC_ENABLED(sc)) uint8_t chip_port_mode; #define CHIP_4_PORT_MODE 0x0 #define CHIP_2_PORT_MODE 0x1 #define CHIP_PORT_MODE_NONE 0x2 #define CHIP_PORT_MODE(sc) ((sc)->devinfo.chip_port_mode) #define CHIP_IS_MODE_4_PORT(sc) (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) uint8_t int_block; #define INT_BLOCK_HC 0 #define INT_BLOCK_IGU 1 #define INT_BLOCK_MODE_NORMAL 0 #define INT_BLOCK_MODE_BW_COMP 2 #define CHIP_INT_MODE_IS_NBC(sc) \ (!CHIP_IS_E1x(sc) && \ !((sc)->devinfo.int_block & INT_BLOCK_MODE_BW_COMP)) #define CHIP_INT_MODE_IS_BC(sc) (!CHIP_INT_MODE_IS_NBC(sc)) uint32_t shmem_base; uint32_t shmem2_base; uint32_t bc_ver; char bc_ver_str[32]; uint32_t mf_cfg_base; /* bootcode shmem address in BAR memory */ struct bxe_mf_info mf_info; int flash_size; #define NVRAM_1MB_SIZE 0x20000 #define NVRAM_TIMEOUT_COUNT 30000 #define NVRAM_PAGE_SIZE 256 /* PCIe capability information */ uint32_t pcie_cap_flags; #define BXE_PM_CAPABLE_FLAG 0x00000001 #define BXE_PCIE_CAPABLE_FLAG 0x00000002 #define BXE_MSI_CAPABLE_FLAG 0x00000004 #define BXE_MSIX_CAPABLE_FLAG 0x00000008 uint16_t pcie_pm_cap_reg; uint16_t pcie_pcie_cap_reg; //uint16_t pcie_devctl; uint16_t pcie_link_width; uint16_t pcie_link_speed; uint16_t pcie_msi_cap_reg; uint16_t pcie_msix_cap_reg; /* device configuration read from bootcode shared memory */ uint32_t hw_config; uint32_t hw_config2; }; /* struct bxe_devinfo */ struct bxe_sp_objs { struct ecore_vlan_mac_obj mac_obj; /* MACs object */ struct ecore_queue_sp_obj q_obj; /* Queue State object */ }; /* struct bxe_sp_objs */ /* * Data that will be used to create a link report message. We will keep the * data used for the last link report in order to prevent reporting the same * link parameters twice. */ struct bxe_link_report_data { uint16_t line_speed; /* Effective line speed */ unsigned long link_report_flags; /* BXE_LINK_REPORT_XXX flags */ }; enum { BXE_LINK_REPORT_FULL_DUPLEX, BXE_LINK_REPORT_LINK_DOWN, BXE_LINK_REPORT_RX_FC_ON, BXE_LINK_REPORT_TX_FC_ON }; /* Top level device private data structure. */ struct bxe_softc { /* * First entry must be a pointer to the BSD ifnet struct which * has a first element of 'void *if_softc' (which is us). XXX */ if_t ifp; struct ifmedia ifmedia; /* network interface media structure */ int media; int state; /* device state */ #define BXE_STATE_CLOSED 0x0000 #define BXE_STATE_OPENING_WAITING_LOAD 0x1000 #define BXE_STATE_OPENING_WAITING_PORT 0x2000 #define BXE_STATE_OPEN 0x3000 #define BXE_STATE_CLOSING_WAITING_HALT 0x4000 #define BXE_STATE_CLOSING_WAITING_DELETE 0x5000 #define BXE_STATE_CLOSING_WAITING_UNLOAD 0x6000 #define BXE_STATE_DISABLED 0xD000 #define BXE_STATE_DIAG 0xE000 #define BXE_STATE_ERROR 0xF000 int flags; #define BXE_ONE_PORT_FLAG 0x00000001 #define BXE_NO_ISCSI 0x00000002 #define BXE_NO_FCOE 0x00000004 #define BXE_ONE_PORT(sc) (sc->flags & BXE_ONE_PORT_FLAG) //#define BXE_NO_WOL_FLAG 0x00000008 //#define BXE_USING_DAC_FLAG 0x00000010 //#define BXE_USING_MSIX_FLAG 0x00000020 //#define BXE_USING_MSI_FLAG 0x00000040 //#define BXE_DISABLE_MSI_FLAG 0x00000080 #define BXE_NO_MCP_FLAG 0x00000200 #define BXE_NOMCP(sc) (sc->flags & BXE_NO_MCP_FLAG) //#define BXE_SAFC_TX_FLAG 0x00000400 #define BXE_MF_FUNC_DIS 0x00000800 #define BXE_TX_SWITCHING 0x00001000 #define BXE_NO_PULSE 0x00002000 unsigned long debug; /* per-instance debug logging config */ #define MAX_BARS 5 struct bxe_bar bar[MAX_BARS]; /* map BARs 0, 2, 4 */ uint16_t doorbell_size; /* periodic timer callout */ #define PERIODIC_STOP 0 #define PERIODIC_GO 1 volatile unsigned long periodic_flags; struct callout periodic_callout; /* chip start/stop/reset taskqueue */ #define CHIP_TQ_NONE 0 #define CHIP_TQ_START 1 #define CHIP_TQ_STOP 2 #define CHIP_TQ_REINIT 3 volatile unsigned long chip_tq_flags; struct task chip_tq_task; struct taskqueue *chip_tq; char chip_tq_name[32]; /* slowpath interrupt taskqueue */ struct task sp_tq_task; struct taskqueue *sp_tq; char sp_tq_name[32]; struct bxe_fastpath fp[MAX_RSS_CHAINS]; struct bxe_sp_objs sp_objs[MAX_RSS_CHAINS]; device_t dev; /* parent device handle */ uint8_t unit; /* driver instance number */ int pcie_bus; /* PCIe bus number */ int pcie_device; /* PCIe device/slot number */ int pcie_func; /* PCIe function number */ uint8_t pfunc_rel; /* function relative */ uint8_t pfunc_abs; /* function absolute */ uint8_t path_id; /* function absolute */ #define SC_PATH(sc) (sc->path_id) #define SC_PORT(sc) (sc->pfunc_rel & 1) #define SC_FUNC(sc) (sc->pfunc_rel) #define SC_ABS_FUNC(sc) (sc->pfunc_abs) #define SC_VN(sc) (sc->pfunc_rel >> 1) #define SC_L_ID(sc) (SC_VN(sc) << 2) #define PORT_ID(sc) SC_PORT(sc) #define PATH_ID(sc) SC_PATH(sc) #define VNIC_ID(sc) SC_VN(sc) #define FUNC_ID(sc) SC_FUNC(sc) #define ABS_FUNC_ID(sc) SC_ABS_FUNC(sc) #define SC_FW_MB_IDX_VN(sc, vn) \ (SC_PORT(sc) + (vn) * \ ((CHIP_IS_E1x(sc) || (CHIP_IS_MODE_4_PORT(sc))) ? 2 : 1)) #define SC_FW_MB_IDX(sc) SC_FW_MB_IDX_VN(sc, SC_VN(sc)) int if_capen; /* enabled interface capabilities */ struct bxe_devinfo devinfo; char fw_ver_str[32]; char mf_mode_str[32]; char pci_link_str[32]; const struct iro *iro_array; #ifdef BXE_CORE_LOCK_SX struct sx core_sx; char core_sx_name[32]; #else struct mtx core_mtx; char core_mtx_name[32]; #endif struct mtx sp_mtx; char sp_mtx_name[32]; struct mtx dmae_mtx; char dmae_mtx_name[32]; struct mtx fwmb_mtx; char fwmb_mtx_name[32]; struct mtx print_mtx; char print_mtx_name[32]; struct mtx stats_mtx; char stats_mtx_name[32]; struct mtx mcast_mtx; char mcast_mtx_name[32]; #ifdef BXE_CORE_LOCK_SX #define BXE_CORE_TRYLOCK(sc) sx_try_xlock(&sc->core_sx) #define BXE_CORE_LOCK(sc) sx_xlock(&sc->core_sx) #define BXE_CORE_UNLOCK(sc) sx_xunlock(&sc->core_sx) #define BXE_CORE_LOCK_ASSERT(sc) sx_assert(&sc->core_sx, SA_XLOCKED) #else #define BXE_CORE_TRYLOCK(sc) mtx_trylock(&sc->core_mtx) #define BXE_CORE_LOCK(sc) mtx_lock(&sc->core_mtx) #define BXE_CORE_UNLOCK(sc) mtx_unlock(&sc->core_mtx) #define BXE_CORE_LOCK_ASSERT(sc) mtx_assert(&sc->core_mtx, MA_OWNED) #endif #define BXE_SP_LOCK(sc) mtx_lock(&sc->sp_mtx) #define BXE_SP_UNLOCK(sc) mtx_unlock(&sc->sp_mtx) #define BXE_SP_LOCK_ASSERT(sc) mtx_assert(&sc->sp_mtx, MA_OWNED) #define BXE_DMAE_LOCK(sc) mtx_lock(&sc->dmae_mtx) #define BXE_DMAE_UNLOCK(sc) mtx_unlock(&sc->dmae_mtx) #define BXE_DMAE_LOCK_ASSERT(sc) mtx_assert(&sc->dmae_mtx, MA_OWNED) #define BXE_FWMB_LOCK(sc) mtx_lock(&sc->fwmb_mtx) #define BXE_FWMB_UNLOCK(sc) mtx_unlock(&sc->fwmb_mtx) #define BXE_FWMB_LOCK_ASSERT(sc) mtx_assert(&sc->fwmb_mtx, MA_OWNED) #define BXE_PRINT_LOCK(sc) mtx_lock(&sc->print_mtx) #define BXE_PRINT_UNLOCK(sc) mtx_unlock(&sc->print_mtx) #define BXE_PRINT_LOCK_ASSERT(sc) mtx_assert(&sc->print_mtx, MA_OWNED) #define BXE_STATS_LOCK(sc) mtx_lock(&sc->stats_mtx) #define BXE_STATS_UNLOCK(sc) mtx_unlock(&sc->stats_mtx) #define BXE_STATS_LOCK_ASSERT(sc) mtx_assert(&sc->stats_mtx, MA_OWNED) #if __FreeBSD_version < 800000 #define BXE_MCAST_LOCK(sc) \ do { \ mtx_lock(&sc->mcast_mtx); \ IF_ADDR_LOCK(sc->ifp); \ } while (0) #define BXE_MCAST_UNLOCK(sc) \ do { \ IF_ADDR_UNLOCK(sc->ifp); \ mtx_unlock(&sc->mcast_mtx); \ } while (0) #else #define BXE_MCAST_LOCK(sc) \ do { \ mtx_lock(&sc->mcast_mtx); \ if_maddr_rlock(sc->ifp); \ } while (0) #define BXE_MCAST_UNLOCK(sc) \ do { \ if_maddr_runlock(sc->ifp); \ mtx_unlock(&sc->mcast_mtx); \ } while (0) #endif #define BXE_MCAST_LOCK_ASSERT(sc) mtx_assert(&sc->mcast_mtx, MA_OWNED) int dmae_ready; #define DMAE_READY(sc) (sc->dmae_ready) struct ecore_credit_pool_obj vlans_pool; struct ecore_credit_pool_obj macs_pool; struct ecore_rx_mode_obj rx_mode_obj; struct ecore_mcast_obj mcast_obj; struct ecore_rss_config_obj rss_conf_obj; struct ecore_func_sp_obj func_obj; uint16_t fw_seq; uint16_t fw_drv_pulse_wr_seq; uint32_t func_stx; struct elink_params link_params; struct elink_vars link_vars; uint32_t link_cnt; struct bxe_link_report_data last_reported_link; char mac_addr_str[32]; int last_reported_link_state; int tx_ring_size; int rx_ring_size; int wol; int is_leader; int recovery_state; #define BXE_RECOVERY_DONE 1 #define BXE_RECOVERY_INIT 2 #define BXE_RECOVERY_WAIT 3 #define BXE_RECOVERY_FAILED 4 #define BXE_RECOVERY_NIC_LOADING 5 uint32_t rx_mode; #define BXE_RX_MODE_NONE 0 #define BXE_RX_MODE_NORMAL 1 #define BXE_RX_MODE_ALLMULTI 2 #define BXE_RX_MODE_PROMISC 3 #define BXE_MAX_MULTICAST 64 struct bxe_port port; struct cmng_init cmng; /* user configs */ int num_queues; int max_rx_bufs; int hc_rx_ticks; int hc_tx_ticks; int rx_budget; int max_aggregation_size; int mrrs; int autogreeen; #define AUTO_GREEN_HW_DEFAULT 0 #define AUTO_GREEN_FORCE_ON 1 #define AUTO_GREEN_FORCE_OFF 2 int interrupt_mode; #define INTR_MODE_INTX 0 #define INTR_MODE_MSI 1 #define INTR_MODE_MSIX 2 int udp_rss; /* interrupt allocations */ struct bxe_intr intr[MAX_RSS_CHAINS+1]; int intr_count; uint8_t igu_dsb_id; uint8_t igu_base_sb; uint8_t igu_sb_cnt; //uint8_t min_msix_vec_cnt; uint32_t igu_base_addr; //bus_addr_t def_status_blk_mapping; uint8_t base_fw_ndsb; #define DEF_SB_IGU_ID 16 #define DEF_SB_ID HC_SP_SB_ID /* parent bus DMA tag */ bus_dma_tag_t parent_dma_tag; /* default status block */ struct bxe_dma def_sb_dma; struct host_sp_status_block *def_sb; uint16_t def_idx; uint16_t def_att_idx; uint32_t attn_state; struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; /* general SP events - stats query, cfc delete, etc */ #define HC_SP_INDEX_ETH_DEF_CONS 3 /* EQ completions */ #define HC_SP_INDEX_EQ_CONS 7 /* FCoE L2 connection completions */ #define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6 #define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4 /* iSCSI L2 */ #define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 #define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 /* event queue */ struct bxe_dma eq_dma; union event_ring_elem *eq; uint16_t eq_prod; uint16_t eq_cons; uint16_t *eq_cons_sb; #define NUM_EQ_PAGES 1 /* must be a power of 2 */ #define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem)) #define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1) #define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES) #define EQ_DESC_MASK (NUM_EQ_DESC - 1) #define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2) /* depends on EQ_DESC_CNT_PAGE being a power of 2 */ #define NEXT_EQ_IDX(x) \ ((((x) & EQ_DESC_MAX_PAGE) == (EQ_DESC_MAX_PAGE - 1)) ? \ ((x) + 2) : ((x) + 1)) /* depends on the above and on NUM_EQ_PAGES being a power of 2 */ #define EQ_DESC(x) ((x) & EQ_DESC_MASK) /* slow path */ struct bxe_dma sp_dma; struct bxe_slowpath *sp; unsigned long sp_state; /* slow path queue */ struct bxe_dma spq_dma; struct eth_spe *spq; #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) #define MAX_SPQ_PENDING 8 uint16_t spq_prod_idx; struct eth_spe *spq_prod_bd; struct eth_spe *spq_last_bd; uint16_t *dsb_sp_prod; //uint16_t *spq_hw_con; //uint16_t spq_left; volatile unsigned long eq_spq_left; /* COMMON_xxx ramrod credit */ volatile unsigned long cq_spq_left; /* ETH_xxx ramrod credit */ /* fw decompression buffer */ struct bxe_dma gz_buf_dma; void *gz_buf; z_streamp gz_strm; uint32_t gz_outlen; #define GUNZIP_BUF(sc) (sc->gz_buf) #define GUNZIP_OUTLEN(sc) (sc->gz_outlen) #define GUNZIP_PHYS(sc) (sc->gz_buf_dma.paddr) #define FW_BUF_SIZE 0x40000 const struct raw_op *init_ops; const uint16_t *init_ops_offsets; /* init block offsets inside init_ops */ const uint32_t *init_data; /* data blob, 32 bit granularity */ uint32_t init_mode_flags; #define INIT_MODE_FLAGS(sc) (sc->init_mode_flags) /* PRAM blobs - raw data */ const uint8_t *tsem_int_table_data; const uint8_t *tsem_pram_data; const uint8_t *usem_int_table_data; const uint8_t *usem_pram_data; const uint8_t *xsem_int_table_data; const uint8_t *xsem_pram_data; const uint8_t *csem_int_table_data; const uint8_t *csem_pram_data; #define INIT_OPS(sc) (sc->init_ops) #define INIT_OPS_OFFSETS(sc) (sc->init_ops_offsets) #define INIT_DATA(sc) (sc->init_data) #define INIT_TSEM_INT_TABLE_DATA(sc) (sc->tsem_int_table_data) #define INIT_TSEM_PRAM_DATA(sc) (sc->tsem_pram_data) #define INIT_USEM_INT_TABLE_DATA(sc) (sc->usem_int_table_data) #define INIT_USEM_PRAM_DATA(sc) (sc->usem_pram_data) #define INIT_XSEM_INT_TABLE_DATA(sc) (sc->xsem_int_table_data) #define INIT_XSEM_PRAM_DATA(sc) (sc->xsem_pram_data) #define INIT_CSEM_INT_TABLE_DATA(sc) (sc->csem_int_table_data) #define INIT_CSEM_PRAM_DATA(sc) (sc->csem_pram_data) /* ILT * For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB * context size we need 8 ILT entries. */ #define ILT_MAX_L2_LINES 8 struct hw_context context[ILT_MAX_L2_LINES]; struct ecore_ilt *ilt; #define ILT_MAX_LINES 256 /* max supported number of RSS queues: IGU SBs minus one for CNIC */ #define BXE_MAX_RSS_COUNT(sc) ((sc)->igu_sb_cnt - CNIC_SUPPORT(sc)) /* max CID count: Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI */ #if 1 #define BXE_L2_MAX_CID(sc) \ (BXE_MAX_RSS_COUNT(sc) * ECORE_MULTI_TX_COS + 2 * CNIC_SUPPORT(sc)) #else #define BXE_L2_MAX_CID(sc) /* OOO + FWD */ \ (BXE_MAX_RSS_COUNT(sc) * ECORE_MULTI_TX_COS + 4 * CNIC_SUPPORT(sc)) #endif #if 1 #define BXE_L2_CID_COUNT(sc) \ (BXE_NUM_ETH_QUEUES(sc) * ECORE_MULTI_TX_COS + 2 * CNIC_SUPPORT(sc)) #else #define BXE_L2_CID_COUNT(sc) /* OOO + FWD */ \ (BXE_NUM_ETH_QUEUES(sc) * ECORE_MULTI_TX_COS + 4 * CNIC_SUPPORT(sc)) #endif #define L2_ILT_LINES(sc) \ (DIV_ROUND_UP(BXE_L2_CID_COUNT(sc), ILT_PAGE_CIDS)) int qm_cid_count; uint8_t dropless_fc; /* total number of FW statistics requests */ uint8_t fw_stats_num; /* * This is a memory buffer that will contain both statistics ramrod * request and data. */ struct bxe_dma fw_stats_dma; /* * FW statistics request shortcut (points at the beginning of fw_stats * buffer). */ int fw_stats_req_size; struct bxe_fw_stats_req *fw_stats_req; bus_addr_t fw_stats_req_mapping; /* * FW statistics data shortcut (points at the beginning of fw_stats * buffer + fw_stats_req_size). */ int fw_stats_data_size; struct bxe_fw_stats_data *fw_stats_data; bus_addr_t fw_stats_data_mapping; /* tracking a pending STAT_QUERY ramrod */ uint16_t stats_pending; /* number of completed statistics ramrods */ uint16_t stats_comp; uint16_t stats_counter; uint8_t stats_init; int stats_state; struct bxe_eth_stats eth_stats; struct host_func_stats func_stats; struct bxe_eth_stats_old eth_stats_old; struct bxe_net_stats_old net_stats_old; struct bxe_fw_port_stats_old fw_stats_old; struct dmae_cmd stats_dmae; /* used by dmae command loader */ int executer_idx; int mtu; /* LLDP params */ struct bxe_config_lldp_params lldp_config_params; /* DCB support on/off */ int dcb_state; #define BXE_DCB_STATE_OFF 0 #define BXE_DCB_STATE_ON 1 /* DCBX engine mode */ int dcbx_enabled; #define BXE_DCBX_ENABLED_OFF 0 #define BXE_DCBX_ENABLED_ON_NEG_OFF 1 #define BXE_DCBX_ENABLED_ON_NEG_ON 2 #define BXE_DCBX_ENABLED_INVALID -1 uint8_t dcbx_mode_uset; struct bxe_config_dcbx_params dcbx_config_params; struct bxe_dcbx_port_params dcbx_port_params; int dcb_version; uint8_t cnic_support; uint8_t cnic_enabled; uint8_t cnic_loaded; #define CNIC_SUPPORT(sc) 0 /* ((sc)->cnic_support) */ #define CNIC_ENABLED(sc) 0 /* ((sc)->cnic_enabled) */ #define CNIC_LOADED(sc) 0 /* ((sc)->cnic_loaded) */ /* multiple tx classes of service */ uint8_t max_cos; #define BXE_MAX_PRIORITY 8 /* priority to cos mapping */ uint8_t prio_to_cos[BXE_MAX_PRIORITY]; int panic; struct cdev *ioctl_dev; - void *grc_dump; - unsigned int trigger_grcdump; - unsigned int grcdump_done; - unsigned int grcdump_started; - + int grcdump_done; void *eeprom; }; /* struct bxe_softc */ /* IOCTL sub-commands for edebug and firmware upgrade */ #define BXE_IOC_RD_NVRAM 1 #define BXE_IOC_WR_NVRAM 2 #define BXE_IOC_STATS_SHOW_NUM 3 #define BXE_IOC_STATS_SHOW_STR 4 #define BXE_IOC_STATS_SHOW_CNT 5 struct bxe_nvram_data { uint32_t op; /* ioctl sub-command */ uint32_t offset; uint32_t len; uint32_t value[1]; /* variable */ }; union bxe_stats_show_data { uint32_t op; /* ioctl sub-command */ struct { uint32_t num; /* return number of stats */ uint32_t len; /* length of each string item */ } desc; /* variable length... */ char str[1]; /* holds names of desc.num stats, each desc.len in length */ /* variable length... */ uint64_t stats[1]; /* holds all stats */ }; /* function init flags */ #define FUNC_FLG_RSS 0x0001 #define FUNC_FLG_STATS 0x0002 /* FUNC_FLG_UNMATCHED 0x0004 */ #define FUNC_FLG_TPA 0x0008 #define FUNC_FLG_SPQ 0x0010 #define FUNC_FLG_LEADING 0x0020 /* PF only */ struct bxe_func_init_params { bus_addr_t fw_stat_map; /* (dma) valid if FUNC_FLG_STATS */ bus_addr_t spq_map; /* (dma) valid if FUNC_FLG_SPQ */ uint16_t func_flgs; uint16_t func_id; /* abs function id */ uint16_t pf_id; uint16_t spq_prod; /* valid if FUNC_FLG_SPQ */ }; /* memory resources reside at BARs 0, 2, 4 */ /* Run `pciconf -lb` to see mappings */ #define BAR0 0 #define BAR1 2 #define BAR2 4 #ifdef BXE_REG_NO_INLINE uint8_t bxe_reg_read8(struct bxe_softc *sc, bus_size_t offset); uint16_t bxe_reg_read16(struct bxe_softc *sc, bus_size_t offset); uint32_t bxe_reg_read32(struct bxe_softc *sc, bus_size_t offset); void bxe_reg_write8(struct bxe_softc *sc, bus_size_t offset, uint8_t val); void bxe_reg_write16(struct bxe_softc *sc, bus_size_t offset, uint16_t val); void bxe_reg_write32(struct bxe_softc *sc, bus_size_t offset, uint32_t val); #define REG_RD8(sc, offset) bxe_reg_read8(sc, offset) #define REG_RD16(sc, offset) bxe_reg_read16(sc, offset) #define REG_RD32(sc, offset) bxe_reg_read32(sc, offset) #define REG_WR8(sc, offset, val) bxe_reg_write8(sc, offset, val) #define REG_WR16(sc, offset, val) bxe_reg_write16(sc, offset, val) #define REG_WR32(sc, offset, val) bxe_reg_write32(sc, offset, val) #else /* not BXE_REG_NO_INLINE */ #define REG_WR8(sc, offset, val) \ bus_space_write_1(sc->bar[BAR0].tag, \ sc->bar[BAR0].handle, \ offset, val) #define REG_WR16(sc, offset, val) \ bus_space_write_2(sc->bar[BAR0].tag, \ sc->bar[BAR0].handle, \ offset, val) #define REG_WR32(sc, offset, val) \ bus_space_write_4(sc->bar[BAR0].tag, \ sc->bar[BAR0].handle, \ offset, val) #define REG_RD8(sc, offset) \ bus_space_read_1(sc->bar[BAR0].tag, \ sc->bar[BAR0].handle, \ offset) #define REG_RD16(sc, offset) \ bus_space_read_2(sc->bar[BAR0].tag, \ sc->bar[BAR0].handle, \ offset) #define REG_RD32(sc, offset) \ bus_space_read_4(sc->bar[BAR0].tag, \ sc->bar[BAR0].handle, \ offset) #endif /* BXE_REG_NO_INLINE */ #define REG_RD(sc, offset) REG_RD32(sc, offset) #define REG_WR(sc, offset, val) REG_WR32(sc, offset, val) #define REG_RD_IND(sc, offset) bxe_reg_rd_ind(sc, offset) #define REG_WR_IND(sc, offset, val) bxe_reg_wr_ind(sc, offset, val) #define BXE_SP(sc, var) (&(sc)->sp->var) #define BXE_SP_MAPPING(sc, var) \ (sc->sp_dma.paddr + offsetof(struct bxe_slowpath, var)) #define BXE_FP(sc, nr, var) ((sc)->fp[(nr)].var) #define BXE_SP_OBJ(sc, fp) ((sc)->sp_objs[(fp)->index]) #define REG_RD_DMAE(sc, offset, valp, len32) \ do { \ bxe_read_dmae(sc, offset, len32); \ memcpy(valp, BXE_SP(sc, wb_data[0]), (len32) * 4); \ } while (0) #define REG_WR_DMAE(sc, offset, valp, len32) \ do { \ memcpy(BXE_SP(sc, wb_data[0]), valp, (len32) * 4); \ bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), offset, len32); \ } while (0) #define REG_WR_DMAE_LEN(sc, offset, valp, len32) \ REG_WR_DMAE(sc, offset, valp, len32) #define REG_RD_DMAE_LEN(sc, offset, valp, len32) \ REG_RD_DMAE(sc, offset, valp, len32) #define VIRT_WR_DMAE_LEN(sc, data, addr, len32, le32_swap) \ do { \ /* if (le32_swap) { */ \ /* BLOGW(sc, "VIRT_WR_DMAE_LEN with le32_swap=1\n"); */ \ /* } */ \ memcpy(GUNZIP_BUF(sc), data, len32 * 4); \ ecore_write_big_buf_wb(sc, addr, len32); \ } while (0) #define BXE_DB_MIN_SHIFT 3 /* 8 bytes */ #define BXE_DB_SHIFT 7 /* 128 bytes */ #if (BXE_DB_SHIFT < BXE_DB_MIN_SHIFT) #error "Minimum DB doorbell stride is 8" #endif #define DPM_TRIGGER_TYPE 0x40 #define DOORBELL(sc, cid, val) \ do { \ bus_space_write_4(sc->bar[BAR1].tag, sc->bar[BAR1].handle, \ ((sc->doorbell_size * (cid)) + DPM_TRIGGER_TYPE), \ (uint32_t)val); \ } while(0) #define SHMEM_ADDR(sc, field) \ (sc->devinfo.shmem_base + offsetof(struct shmem_region, field)) #define SHMEM_RD(sc, field) REG_RD(sc, SHMEM_ADDR(sc, field)) #define SHMEM_RD16(sc, field) REG_RD16(sc, SHMEM_ADDR(sc, field)) #define SHMEM_WR(sc, field, val) REG_WR(sc, SHMEM_ADDR(sc, field), val) #define SHMEM2_ADDR(sc, field) \ (sc->devinfo.shmem2_base + offsetof(struct shmem2_region, field)) #define SHMEM2_HAS(sc, field) \ (sc->devinfo.shmem2_base && (REG_RD(sc, SHMEM2_ADDR(sc, size)) > \ offsetof(struct shmem2_region, field))) #define SHMEM2_RD(sc, field) REG_RD(sc, SHMEM2_ADDR(sc, field)) #define SHMEM2_WR(sc, field, val) REG_WR(sc, SHMEM2_ADDR(sc, field), val) #define MFCFG_ADDR(sc, field) \ (sc->devinfo.mf_cfg_base + offsetof(struct mf_cfg, field)) #define MFCFG_RD(sc, field) REG_RD(sc, MFCFG_ADDR(sc, field)) #define MFCFG_RD16(sc, field) REG_RD16(sc, MFCFG_ADDR(sc, field)) #define MFCFG_WR(sc, field, val) REG_WR(sc, MFCFG_ADDR(sc, field), val) /* DMAE command defines */ #define DMAE_TIMEOUT -1 #define DMAE_PCI_ERROR -2 /* E2 and onward */ #define DMAE_NOT_RDY -3 #define DMAE_PCI_ERR_FLAG 0x80000000 #define DMAE_SRC_PCI 0 #define DMAE_SRC_GRC 1 #define DMAE_DST_NONE 0 #define DMAE_DST_PCI 1 #define DMAE_DST_GRC 2 #define DMAE_COMP_PCI 0 #define DMAE_COMP_GRC 1 #define DMAE_COMP_REGULAR 0 #define DMAE_COM_SET_ERR 1 #define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << DMAE_CMD_SRC_SHIFT) #define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << DMAE_CMD_SRC_SHIFT) #define DMAE_CMD_DST_PCI (DMAE_DST_PCI << DMAE_CMD_DST_SHIFT) #define DMAE_CMD_DST_GRC (DMAE_DST_GRC << DMAE_CMD_DST_SHIFT) #define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << DMAE_CMD_C_DST_SHIFT) #define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << DMAE_CMD_C_DST_SHIFT) #define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_CMD_ENDIANITY_SHIFT) #define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_CMD_ENDIANITY_SHIFT) #define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_CMD_ENDIANITY_SHIFT) #define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_CMD_ENDIANITY_SHIFT) #define DMAE_CMD_PORT_0 0 #define DMAE_CMD_PORT_1 DMAE_CMD_PORT #define DMAE_SRC_PF 0 #define DMAE_SRC_VF 1 #define DMAE_DST_PF 0 #define DMAE_DST_VF 1 #define DMAE_C_SRC 0 #define DMAE_C_DST 1 #define DMAE_LEN32_RD_MAX 0x80 #define DMAE_LEN32_WR_MAX(sc) (CHIP_IS_E1(sc) ? 0x400 : 0x2000) #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and beyond, upper bit indicates error */ #define MAX_DMAE_C_PER_PORT 8 #define INIT_DMAE_C(sc) ((SC_PORT(sc) * MAX_DMAE_C_PER_PORT) + SC_VN(sc)) #define PMF_DMAE_C(sc) ((SC_PORT(sc) * MAX_DMAE_C_PER_PORT) + E1HVN_MAX) static const uint32_t dmae_reg_go_c[] = { DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 }; #define ATTN_NIG_FOR_FUNC (1L << 8) #define ATTN_SW_TIMER_4_FUNC (1L << 9) #define GPIO_2_FUNC (1L << 10) #define GPIO_3_FUNC (1L << 11) #define GPIO_4_FUNC (1L << 12) #define ATTN_GENERAL_ATTN_1 (1L << 13) #define ATTN_GENERAL_ATTN_2 (1L << 14) #define ATTN_GENERAL_ATTN_3 (1L << 15) #define ATTN_GENERAL_ATTN_4 (1L << 13) #define ATTN_GENERAL_ATTN_5 (1L << 14) #define ATTN_GENERAL_ATTN_6 (1L << 15) #define ATTN_HARD_WIRED_MASK 0xff00 #define ATTENTION_ID 4 #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR #define MAX_IGU_ATTN_ACK_TO 100 #define STORM_ASSERT_ARRAY_SIZE 50 #define BXE_PMF_LINK_ASSERT(sc) \ GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + SC_FUNC(sc)) #define BXE_MC_ASSERT_BITS \ (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT)) #define BXE_MCP_ASSERT \ GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT) #define BXE_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC) #define BXE_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) #define MULTI_MASK 0x7f #define PFS_PER_PORT(sc) \ ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4) #define SC_MAX_VN_NUM(sc) PFS_PER_PORT(sc) #define FIRST_ABS_FUNC_IN_PORT(sc) \ ((CHIP_PORT_MODE(sc) == CHIP_PORT_MODE_NONE) ? \ PORT_ID(sc) : (PATH_ID(sc) + (2 * PORT_ID(sc)))) #define FOREACH_ABS_FUNC_IN_PORT(sc, i) \ for ((i) = FIRST_ABS_FUNC_IN_PORT(sc); \ (i) < MAX_FUNC_NUM; \ (i) += (MAX_FUNC_NUM / PFS_PER_PORT(sc))) #define BXE_SWCID_SHIFT 17 #define BXE_SWCID_MASK ((0x1 << BXE_SWCID_SHIFT) - 1) #define SW_CID(x) (le32toh(x) & BXE_SWCID_MASK) #define CQE_CMD(x) (le32toh(x) >> COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) #define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) #define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) #define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) #define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) /* must be used on a CID before placing it on a HW ring */ #define HW_CID(sc, x) \ ((SC_PORT(sc) << 23) | (SC_VN(sc) << BXE_SWCID_SHIFT) | (x)) #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 #define SPEED_2500 2500 #define SPEED_10000 10000 #define PCI_PM_D0 1 #define PCI_PM_D3hot 2 #ifndef DUPLEX_UNKNOWN #define DUPLEX_UNKNOWN (0xff) #endif #ifndef SPEED_UNKNOWN #define SPEED_UNKNOWN (-1) #endif /* Enable or disable autonegotiation. */ #define AUTONEG_DISABLE 0x00 #define AUTONEG_ENABLE 0x01 /* Which connector port. */ #define PORT_TP 0x00 #define PORT_AUI 0x01 #define PORT_MII 0x02 #define PORT_FIBRE 0x03 #define PORT_BNC 0x04 #define PORT_DA 0x05 #define PORT_NONE 0xef #define PORT_OTHER 0xff int bxe_test_bit(int nr, volatile unsigned long * addr); void bxe_set_bit(unsigned int nr, volatile unsigned long * addr); void bxe_clear_bit(int nr, volatile unsigned long * addr); int bxe_test_and_set_bit(int nr, volatile unsigned long * addr); int bxe_test_and_clear_bit(int nr, volatile unsigned long * addr); int bxe_cmpxchg(volatile int *addr, int old, int new); void bxe_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val); uint32_t bxe_reg_rd_ind(struct bxe_softc *sc, uint32_t addr); int bxe_dma_alloc(struct bxe_softc *sc, bus_size_t size, struct bxe_dma *dma, const char *msg); void bxe_dma_free(struct bxe_softc *sc, struct bxe_dma *dma); uint32_t bxe_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type); uint32_t bxe_dmae_opcode_clr_src_reset(uint32_t opcode); uint32_t bxe_dmae_opcode(struct bxe_softc *sc, uint8_t src_type, uint8_t dst_type, uint8_t with_comp, uint8_t comp_type); void bxe_post_dmae(struct bxe_softc *sc, struct dmae_cmd *dmae, int idx); void bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr, uint32_t len32); void bxe_write_dmae(struct bxe_softc *sc, bus_addr_t dma_addr, uint32_t dst_addr, uint32_t len32); void bxe_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len); void bxe_set_ctx_validation(struct bxe_softc *sc, struct eth_context *cxt, uint32_t cid); void bxe_update_coalesce_sb_index(struct bxe_softc *sc, uint8_t fw_sb_id, uint8_t sb_index, uint8_t disable, uint16_t usec); int bxe_sp_post(struct bxe_softc *sc, int command, int cid, uint32_t data_hi, uint32_t data_lo, int cmd_type); void bxe_igu_ack_sb(struct bxe_softc *sc, uint8_t igu_sb_id, uint8_t segment, uint16_t index, uint8_t op, uint8_t update); void ecore_init_e1_firmware(struct bxe_softc *sc); void ecore_init_e1h_firmware(struct bxe_softc *sc); void ecore_init_e2_firmware(struct bxe_softc *sc); void ecore_storm_memset_struct(struct bxe_softc *sc, uint32_t addr, size_t size, uint32_t *data); /*********************/ /* LOGGING AND DEBUG */ /*********************/ /* debug logging codepaths */ #define DBG_LOAD 0x00000001 /* load and unload */ #define DBG_INTR 0x00000002 /* interrupt handling */ #define DBG_SP 0x00000004 /* slowpath handling */ #define DBG_STATS 0x00000008 /* stats updates */ #define DBG_TX 0x00000010 /* packet transmit */ #define DBG_RX 0x00000020 /* packet receive */ #define DBG_PHY 0x00000040 /* phy/link handling */ #define DBG_IOCTL 0x00000080 /* ioctl handling */ #define DBG_MBUF 0x00000100 /* dumping mbuf info */ #define DBG_REGS 0x00000200 /* register access */ #define DBG_LRO 0x00000400 /* lro processing */ #define DBG_ASSERT 0x80000000 /* debug assert */ #define DBG_ALL 0xFFFFFFFF /* flying monkeys */ #define DBASSERT(sc, exp, msg) \ do { \ if (__predict_false(sc->debug & DBG_ASSERT)) { \ if (__predict_false(!(exp))) { \ panic msg; \ } \ } \ } while (0) /* log a debug message */ #define BLOGD(sc, codepath, format, args...) \ do { \ if (__predict_false(sc->debug & (codepath))) { \ device_printf((sc)->dev, \ "%s(%s:%d) " format, \ __FUNCTION__, \ __FILE__, \ __LINE__, \ ## args); \ } \ } while(0) /* log a info message */ #define BLOGI(sc, format, args...) \ do { \ if (__predict_false(sc->debug)) { \ device_printf((sc)->dev, \ "%s(%s:%d) " format, \ __FUNCTION__, \ __FILE__, \ __LINE__, \ ## args); \ } else { \ device_printf((sc)->dev, \ format, \ ## args); \ } \ } while(0) /* log a warning message */ #define BLOGW(sc, format, args...) \ do { \ if (__predict_false(sc->debug)) { \ device_printf((sc)->dev, \ "%s(%s:%d) WARNING: " format, \ __FUNCTION__, \ __FILE__, \ __LINE__, \ ## args); \ } else { \ device_printf((sc)->dev, \ "WARNING: " format, \ ## args); \ } \ } while(0) /* log a error message */ #define BLOGE(sc, format, args...) \ do { \ if (__predict_false(sc->debug)) { \ device_printf((sc)->dev, \ "%s(%s:%d) ERROR: " format, \ __FUNCTION__, \ __FILE__, \ __LINE__, \ ## args); \ } else { \ device_printf((sc)->dev, \ "ERROR: " format, \ ## args); \ } \ } while(0) #ifdef ECORE_STOP_ON_ERROR #define bxe_panic(sc, msg) \ do { \ panic msg; \ } while (0) #else #define bxe_panic(sc, msg) \ device_printf((sc)->dev, "%s (%s,%d)\n", __FUNCTION__, __FILE__, __LINE__); #endif #define CATC_TRIGGER(sc, data) REG_WR((sc), 0x2000, (data)); #define CATC_TRIGGER_START(sc) CATC_TRIGGER((sc), 0xcafecafe) void bxe_dump_mem(struct bxe_softc *sc, char *tag, uint8_t *mem, uint32_t len); void bxe_dump_mbuf_data(struct bxe_softc *sc, char *pTag, struct mbuf *m, uint8_t contents); -extern int bxe_grc_dump(struct bxe_softc *sc); + #if __FreeBSD_version >= 800000 #if __FreeBSD_version >= 1000000 #define BXE_SET_FLOWID(m) M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE) #define BXE_VALID_FLOWID(m) (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) #else #define BXE_VALID_FLOWID(m) ((m->m_flags & M_FLOWID) != 0) #define BXE_SET_FLOWID(m) m->m_flags |= M_FLOWID #endif #endif /* #if __FreeBSD_version >= 800000 */ /***********/ /* INLINES */ /***********/ static inline uint32_t reg_poll(struct bxe_softc *sc, uint32_t reg, uint32_t expected, int ms, int wait) { uint32_t val; do { val = REG_RD(sc, reg); if (val == expected) { break; } ms -= wait; DELAY(wait * 1000); } while (ms > 0); return (val); } static inline void bxe_update_fp_sb_idx(struct bxe_fastpath *fp) { mb(); /* status block is written to by the chip */ fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; } static inline void bxe_igu_ack_sb_gen(struct bxe_softc *sc, uint8_t igu_sb_id, uint8_t segment, uint16_t index, uint8_t op, uint8_t update, uint32_t igu_addr) { struct igu_regular cmd_data = {0}; cmd_data.sb_id_and_flags = ((index << IGU_REGULAR_SB_INDEX_SHIFT) | (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | (update << IGU_REGULAR_BUPDATE_SHIFT) | (op << IGU_REGULAR_ENABLE_INT_SHIFT)); BLOGD(sc, DBG_INTR, "write 0x%08x to IGU addr 0x%x\n", cmd_data.sb_id_and_flags, igu_addr); REG_WR(sc, igu_addr, cmd_data.sb_id_and_flags); /* Make sure that ACK is written */ bus_space_barrier(sc->bar[0].tag, sc->bar[0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); } static inline void bxe_hc_ack_sb(struct bxe_softc *sc, uint8_t sb_id, uint8_t storm, uint16_t index, uint8_t op, uint8_t update) { uint32_t hc_addr = (HC_REG_COMMAND_REG + SC_PORT(sc)*32 + COMMAND_REG_INT_ACK); struct igu_ack_register igu_ack; igu_ack.status_block_index = index; igu_ack.sb_id_and_flags = ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); REG_WR(sc, hc_addr, (*(uint32_t *)&igu_ack)); /* Make sure that ACK is written */ bus_space_barrier(sc->bar[0].tag, sc->bar[0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); } static inline void bxe_ack_sb(struct bxe_softc *sc, uint8_t igu_sb_id, uint8_t storm, uint16_t index, uint8_t op, uint8_t update) { if (sc->devinfo.int_block == INT_BLOCK_HC) bxe_hc_ack_sb(sc, igu_sb_id, storm, index, op, update); else { uint8_t segment; if (CHIP_INT_MODE_IS_BC(sc)) { segment = storm; } else if (igu_sb_id != sc->igu_dsb_id) { segment = IGU_SEG_ACCESS_DEF; } else if (storm == ATTENTION_ID) { segment = IGU_SEG_ACCESS_ATTN; } else { segment = IGU_SEG_ACCESS_DEF; } bxe_igu_ack_sb(sc, igu_sb_id, segment, index, op, update); } } static inline uint16_t bxe_hc_ack_int(struct bxe_softc *sc) { uint32_t hc_addr = (HC_REG_COMMAND_REG + SC_PORT(sc)*32 + COMMAND_REG_SIMD_MASK); uint32_t result = REG_RD(sc, hc_addr); mb(); return (result); } static inline uint16_t bxe_igu_ack_int(struct bxe_softc *sc) { uint32_t igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); uint32_t result = REG_RD(sc, igu_addr); BLOGD(sc, DBG_INTR, "read 0x%08x from IGU addr 0x%x\n", result, igu_addr); mb(); return (result); } static inline uint16_t bxe_ack_int(struct bxe_softc *sc) { mb(); if (sc->devinfo.int_block == INT_BLOCK_HC) { return (bxe_hc_ack_int(sc)); } else { return (bxe_igu_ack_int(sc)); } } static inline int func_by_vn(struct bxe_softc *sc, int vn) { return (2 * vn + SC_PORT(sc)); } /* * Statistics ID are global per chip/path, while Client IDs for E1x * are per port. */ static inline uint8_t bxe_stats_id(struct bxe_fastpath *fp) { struct bxe_softc *sc = fp->sc; if (!CHIP_IS_E1x(sc)) { return (fp->cl_id); } return (fp->cl_id + SC_PORT(sc) * FP_SB_MAX_E1x); } #endif /* __BXE_H__ */ Index: projects/release-pkg/sys/dev/bxe/bxe_stats.c =================================================================== --- projects/release-pkg/sys/dev/bxe/bxe_stats.c (revision 297926) +++ projects/release-pkg/sys/dev/bxe/bxe_stats.c (revision 297927) @@ -1,1848 +1,1840 @@ /*- * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "bxe.h" #include "bxe_stats.h" #ifdef __i386__ #define BITS_PER_LONG 32 #else #define BITS_PER_LONG 64 #endif static inline long bxe_hilo(uint32_t *hiref) { uint32_t lo = *(hiref + 1); #if (BITS_PER_LONG == 64) uint32_t hi = *hiref; return (HILO_U64(hi, lo)); #else return (lo); #endif } static inline uint16_t bxe_get_port_stats_dma_len(struct bxe_softc *sc) { uint16_t res = 0; uint32_t size; /* 'newest' convention - shmem2 contains the size of the port stats */ if (SHMEM2_HAS(sc, sizeof_port_stats)) { size = SHMEM2_RD(sc, sizeof_port_stats); if (size) { res = size; } /* prevent newer BC from causing buffer overflow */ if (res > sizeof(struct host_port_stats)) { res = sizeof(struct host_port_stats); } } /* * Older convention - all BCs support the port stats fields up until * the 'not_used' field */ if (!res) { res = (offsetof(struct host_port_stats, not_used) + 4); /* if PFC stats are supported by the MFW, DMA them as well */ if (sc->devinfo.bc_ver >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) { res += (offsetof(struct host_port_stats, pfc_frames_rx_lo) - offsetof(struct host_port_stats, pfc_frames_tx_hi) + 4); } } res >>= 2; DBASSERT(sc, !(res > 2 * DMAE_LEN32_RD_MAX), ("big stats dmae length\n")); return (res); } /* * Init service functions */ static void bxe_dp_stats(struct bxe_softc *sc) { int i; BLOGD(sc, DBG_STATS, "dumping stats:\n" " fw_stats_req\n" " hdr\n" " cmd_num %d\n" " reserved0 %d\n" " drv_stats_counter %d\n" " reserved1 %d\n" " stats_counters_addrs %x %x\n", sc->fw_stats_req->hdr.cmd_num, sc->fw_stats_req->hdr.reserved0, sc->fw_stats_req->hdr.drv_stats_counter, sc->fw_stats_req->hdr.reserved1, sc->fw_stats_req->hdr.stats_counters_addrs.hi, sc->fw_stats_req->hdr.stats_counters_addrs.lo); for (i = 0; i < sc->fw_stats_req->hdr.cmd_num; i++) { BLOGD(sc, DBG_STATS, "query[%d]\n" " kind %d\n" " index %d\n" " funcID %d\n" " reserved %d\n" " address %x %x\n", i, sc->fw_stats_req->query[i].kind, sc->fw_stats_req->query[i].index, sc->fw_stats_req->query[i].funcID, sc->fw_stats_req->query[i].reserved, sc->fw_stats_req->query[i].address.hi, sc->fw_stats_req->query[i].address.lo); } } /* * Post the next statistics ramrod. Protect it with the lock in * order to ensure the strict order between statistics ramrods * (each ramrod has a sequence number passed in a * sc->fw_stats_req->hdr.drv_stats_counter and ramrods must be * sent in order). */ static void bxe_storm_stats_post(struct bxe_softc *sc) { int rc; if (!sc->stats_pending) { BXE_STATS_LOCK(sc); if (sc->stats_pending) { BXE_STATS_UNLOCK(sc); return; } sc->fw_stats_req->hdr.drv_stats_counter = htole16(sc->stats_counter++); BLOGD(sc, DBG_STATS, "sending statistics ramrod %d\n", le16toh(sc->fw_stats_req->hdr.drv_stats_counter)); /* adjust the ramrod to include VF queues statistics */ // XXX bxe_iov_adjust_stats_req(sc); bxe_dp_stats(sc); /* send FW stats ramrod */ rc = bxe_sp_post(sc, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, U64_HI(sc->fw_stats_req_mapping), U64_LO(sc->fw_stats_req_mapping), NONE_CONNECTION_TYPE); if (rc == 0) { sc->stats_pending = 1; } BXE_STATS_UNLOCK(sc); } } static void bxe_hw_stats_post(struct bxe_softc *sc) { struct dmae_cmd *dmae = &sc->stats_dmae; uint32_t *stats_comp = BXE_SP(sc, stats_comp); int loader_idx; uint32_t opcode; *stats_comp = DMAE_COMP_VAL; if (CHIP_REV_IS_SLOW(sc)) { return; } /* Update MCP's statistics if possible */ if (sc->func_stx) { memcpy(BXE_SP(sc, func_stats), &sc->func_stats, sizeof(sc->func_stats)); } /* loader */ if (sc->executer_idx) { loader_idx = PMF_DMAE_C(sc); opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, TRUE, DMAE_COMP_GRC); opcode = bxe_dmae_opcode_clr_src_reset(opcode); memset(dmae, 0, sizeof(struct dmae_cmd)); dmae->opcode = opcode; dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0])); dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0])); dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM + sizeof(struct dmae_cmd) * (loader_idx + 1)) >> 2); dmae->dst_addr_hi = 0; dmae->len = sizeof(struct dmae_cmd) >> 2; if (CHIP_IS_E1(sc)) { dmae->len--; } dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx + 1] >> 2); dmae->comp_addr_hi = 0; dmae->comp_val = 1; *stats_comp = 0; bxe_post_dmae(sc, dmae, loader_idx); } else if (sc->func_stx) { *stats_comp = 0; bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); } } static int bxe_stats_comp(struct bxe_softc *sc) { uint32_t *stats_comp = BXE_SP(sc, stats_comp); int cnt = 10; while (*stats_comp != DMAE_COMP_VAL) { if (!cnt) { BLOGE(sc, "Timeout waiting for stats finished\n"); - if(sc->trigger_grcdump) { - /* taking grcdump */ - bxe_grc_dump(sc); - } break; } cnt--; DELAY(1000); } return (1); } /* * Statistics service functions */ static void bxe_stats_pmf_update(struct bxe_softc *sc) { struct dmae_cmd *dmae; uint32_t opcode; int loader_idx = PMF_DMAE_C(sc); uint32_t *stats_comp = BXE_SP(sc, stats_comp); if (sc->devinfo.bc_ver <= 0x06001400) { /* * Bootcode v6.0.21 fixed a GRC timeout that occurs when accessing * BRB registers while the BRB block is in reset. The DMA transfer * below triggers this issue resulting in the DMAE to stop * functioning. Skip this initial stats transfer for old bootcode * versions <= 6.0.20. */ return; } /* sanity */ if (!sc->port.pmf || !sc->port.port_stx) { BLOGE(sc, "BUG!\n"); return; } sc->executer_idx = 0; opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, FALSE, 0); dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); dmae->src_addr_lo = (sc->port.port_stx >> 2); dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats)); dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats)); dmae->len = DMAE_LEN32_RD_MAX; dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); dmae->comp_addr_hi = 0; dmae->comp_val = 1; dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); dmae->src_addr_lo = ((sc->port.port_stx >> 2) + DMAE_LEN32_RD_MAX); dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats) + DMAE_LEN32_RD_MAX * 4); dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats) + DMAE_LEN32_RD_MAX * 4); dmae->len = (bxe_get_port_stats_dma_len(sc) - DMAE_LEN32_RD_MAX); dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; bxe_hw_stats_post(sc); bxe_stats_comp(sc); } static void bxe_port_stats_init(struct bxe_softc *sc) { struct dmae_cmd *dmae; int port = SC_PORT(sc); uint32_t opcode; int loader_idx = PMF_DMAE_C(sc); uint32_t mac_addr; uint32_t *stats_comp = BXE_SP(sc, stats_comp); /* sanity */ if (!sc->link_vars.link_up || !sc->port.pmf) { BLOGE(sc, "BUG!\n"); return; } sc->executer_idx = 0; /* MCP */ opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, TRUE, DMAE_COMP_GRC); if (sc->port.port_stx) { dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats)); dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats)); dmae->dst_addr_lo = sc->port.port_stx >> 2; dmae->dst_addr_hi = 0; dmae->len = bxe_get_port_stats_dma_len(sc); dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; } if (sc->func_stx) { dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats)); dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats)); dmae->dst_addr_lo = (sc->func_stx >> 2); dmae->dst_addr_hi = 0; dmae->len = (sizeof(struct host_func_stats) >> 2); dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); dmae->comp_addr_hi = 0; dmae->comp_val = 1; } /* MAC */ opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, TRUE, DMAE_COMP_GRC); /* EMAC is special */ if (sc->link_vars.mac_type == ELINK_MAC_TYPE_EMAC) { mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats)); dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats)); dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); dmae->comp_addr_hi = 0; dmae->comp_val = 1; /* EMAC_REG_EMAC_RX_STAT_AC_28 */ dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2); dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); dmae->len = 1; dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); dmae->comp_addr_hi = 0; dmae->comp_val = 1; /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = ((mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2); dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); dmae->comp_addr_hi = 0; dmae->comp_val = 1; } else { uint32_t tx_src_addr_lo, rx_src_addr_lo; uint16_t rx_len, tx_len; /* configure the params according to MAC type */ switch (sc->link_vars.mac_type) { case ELINK_MAC_TYPE_BMAC: mac_addr = (port) ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; /* BIGMAC_REGISTER_TX_STAT_GTPKT .. BIGMAC_REGISTER_TX_STAT_GTBYT */ if (CHIP_IS_E1x(sc)) { tx_src_addr_lo = ((mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2); tx_len = ((8 + BIGMAC_REGISTER_TX_STAT_GTBYT - BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2); rx_src_addr_lo = ((mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2); rx_len = ((8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - BIGMAC_REGISTER_RX_STAT_GR64) >> 2); } else { tx_src_addr_lo = ((mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2); tx_len = ((8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2); rx_src_addr_lo = ((mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2); rx_len = ((8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - BIGMAC2_REGISTER_RX_STAT_GR64) >> 2); } break; case ELINK_MAC_TYPE_UMAC: /* handled by MSTAT */ case ELINK_MAC_TYPE_XMAC: /* handled by MSTAT */ default: mac_addr = (port) ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; tx_src_addr_lo = ((mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2); rx_src_addr_lo = ((mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2); tx_len = (sizeof(sc->sp->mac_stats.mstat_stats.stats_tx) >> 2); rx_len = (sizeof(sc->sp->mac_stats.mstat_stats.stats_rx) >> 2); break; } /* TX stats */ dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = tx_src_addr_lo; dmae->src_addr_hi = 0; dmae->len = tx_len; dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats)); dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats)); dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; /* RX stats */ dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_hi = 0; dmae->src_addr_lo = rx_src_addr_lo; dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2)); dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, mac_stats) + (tx_len << 2)); dmae->len = rx_len; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; } /* NIG */ if (!CHIP_IS_E3(sc)) { dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) + offsetof(struct nig_stats, egress_mac_pkt0_lo)); dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) + offsetof(struct nig_stats, egress_mac_pkt0_lo)); dmae->len = ((2 * sizeof(uint32_t)) >> 2); dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); dmae->comp_addr_hi = 0; dmae->comp_val = 1; dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats) + offsetof(struct nig_stats, egress_mac_pkt1_lo)); dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats) + offsetof(struct nig_stats, egress_mac_pkt1_lo)); dmae->len = ((2 * sizeof(uint32_t)) >> 2); dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); dmae->comp_addr_hi = 0; dmae->comp_val = 1; } dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_GRC, DMAE_DST_PCI, TRUE, DMAE_COMP_PCI); dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : NIG_REG_STAT0_BRB_DISCARD) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, nig_stats)); dmae->dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, nig_stats)); dmae->len = (sizeof(struct nig_stats) - 4*sizeof(uint32_t)) >> 2; dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; } static void bxe_func_stats_init(struct bxe_softc *sc) { struct dmae_cmd *dmae = &sc->stats_dmae; uint32_t *stats_comp = BXE_SP(sc, stats_comp); /* sanity */ if (!sc->func_stx) { BLOGE(sc, "BUG!\n"); return; } sc->executer_idx = 0; memset(dmae, 0, sizeof(struct dmae_cmd)); dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, TRUE, DMAE_COMP_PCI); dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats)); dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats)); dmae->dst_addr_lo = (sc->func_stx >> 2); dmae->dst_addr_hi = 0; dmae->len = (sizeof(struct host_func_stats) >> 2); dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; } static void bxe_stats_start(struct bxe_softc *sc) { /* * VFs travel through here as part of the statistics FSM, but no action * is required */ if (IS_VF(sc)) { return; } if (sc->port.pmf) { bxe_port_stats_init(sc); } else if (sc->func_stx) { bxe_func_stats_init(sc); } bxe_hw_stats_post(sc); bxe_storm_stats_post(sc); } static void bxe_stats_pmf_start(struct bxe_softc *sc) { bxe_stats_comp(sc); bxe_stats_pmf_update(sc); bxe_stats_start(sc); } static void bxe_stats_restart(struct bxe_softc *sc) { /* * VFs travel through here as part of the statistics FSM, but no action * is required */ if (IS_VF(sc)) { return; } bxe_stats_comp(sc); bxe_stats_start(sc); } static void bxe_bmac_stats_update(struct bxe_softc *sc) { struct host_port_stats *pstats = BXE_SP(sc, port_stats); struct bxe_eth_stats *estats = &sc->eth_stats; struct { uint32_t lo; uint32_t hi; } diff; if (CHIP_IS_E1x(sc)) { struct bmac1_stats *new = BXE_SP(sc, mac_stats.bmac1_stats); /* the macros below will use "bmac1_stats" type */ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); UPDATE_STAT64(tx_stat_gt127, tx_stat_etherstatspkts65octetsto127octets); UPDATE_STAT64(tx_stat_gt255, tx_stat_etherstatspkts128octetsto255octets); UPDATE_STAT64(tx_stat_gt511, tx_stat_etherstatspkts256octetsto511octets); UPDATE_STAT64(tx_stat_gt1023, tx_stat_etherstatspkts512octetsto1023octets); UPDATE_STAT64(tx_stat_gt1518, tx_stat_etherstatspkts1024octetsto1522octets); UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); UPDATE_STAT64(tx_stat_gterr, tx_stat_dot3statsinternalmactransmiterrors); UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); } else { struct bmac2_stats *new = BXE_SP(sc, mac_stats.bmac2_stats); struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old; /* the macros below will use "bmac2_stats" type */ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); UPDATE_STAT64(tx_stat_gt127, tx_stat_etherstatspkts65octetsto127octets); UPDATE_STAT64(tx_stat_gt255, tx_stat_etherstatspkts128octetsto255octets); UPDATE_STAT64(tx_stat_gt511, tx_stat_etherstatspkts256octetsto511octets); UPDATE_STAT64(tx_stat_gt1023, tx_stat_etherstatspkts512octetsto1023octets); UPDATE_STAT64(tx_stat_gt1518, tx_stat_etherstatspkts1024octetsto1522octets); UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); UPDATE_STAT64(tx_stat_gterr, tx_stat_dot3statsinternalmactransmiterrors); UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); /* collect PFC stats */ pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; ADD_64(pstats->pfc_frames_tx_hi, fwstats->pfc_frames_tx_hi, pstats->pfc_frames_tx_lo, fwstats->pfc_frames_tx_lo); pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; ADD_64(pstats->pfc_frames_rx_hi, fwstats->pfc_frames_rx_hi, pstats->pfc_frames_rx_lo, fwstats->pfc_frames_rx_lo); } estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi; estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo; estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi; estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi; estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo; estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi; estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo; } static void bxe_mstat_stats_update(struct bxe_softc *sc) { struct host_port_stats *pstats = BXE_SP(sc, port_stats); struct bxe_eth_stats *estats = &sc->eth_stats; struct mstat_stats *new = BXE_SP(sc, mac_stats.mstat_stats); ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); /* collect pfc stats */ ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets); ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets); ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets); ADD_STAT64(stats_tx.tx_gt1023, tx_stat_etherstatspkts512octetsto1023octets); ADD_STAT64(stats_tx.tx_gt1518, tx_stat_etherstatspkts1024octetsto1522octets); ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors); ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); estats->etherstatspkts1024octetsto1522octets_hi = pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; estats->etherstatspkts1024octetsto1522octets_lo = pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; estats->etherstatspktsover1522octets_hi = pstats->mac_stx[1].tx_stat_mac_2047_hi; estats->etherstatspktsover1522octets_lo = pstats->mac_stx[1].tx_stat_mac_2047_lo; ADD_64(estats->etherstatspktsover1522octets_hi, pstats->mac_stx[1].tx_stat_mac_4095_hi, estats->etherstatspktsover1522octets_lo, pstats->mac_stx[1].tx_stat_mac_4095_lo); ADD_64(estats->etherstatspktsover1522octets_hi, pstats->mac_stx[1].tx_stat_mac_9216_hi, estats->etherstatspktsover1522octets_lo, pstats->mac_stx[1].tx_stat_mac_9216_lo); ADD_64(estats->etherstatspktsover1522octets_hi, pstats->mac_stx[1].tx_stat_mac_16383_hi, estats->etherstatspktsover1522octets_lo, pstats->mac_stx[1].tx_stat_mac_16383_lo); estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi; estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo; estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi; estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi; estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo; estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi; estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo; } static void bxe_emac_stats_update(struct bxe_softc *sc) { struct emac_stats *new = BXE_SP(sc, mac_stats.emac_stats); struct host_port_stats *pstats = BXE_SP(sc, port_stats); struct bxe_eth_stats *estats = &sc->eth_stats; UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); UPDATE_EXTEND_STAT(tx_stat_outxonsent); UPDATE_EXTEND_STAT(tx_stat_outxoffsent); UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; ADD_64(estats->pause_frames_received_hi, pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, estats->pause_frames_received_lo, pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxonsent_hi; estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxonsent_lo; ADD_64(estats->pause_frames_sent_hi, pstats->mac_stx[1].tx_stat_outxoffsent_hi, estats->pause_frames_sent_lo, pstats->mac_stx[1].tx_stat_outxoffsent_lo); } static int bxe_hw_stats_update(struct bxe_softc *sc) { struct nig_stats *new = BXE_SP(sc, nig_stats); struct nig_stats *old = &(sc->port.old_nig_stats); struct host_port_stats *pstats = BXE_SP(sc, port_stats); struct bxe_eth_stats *estats = &sc->eth_stats; uint32_t lpi_reg, nig_timer_max; struct { uint32_t lo; uint32_t hi; } diff; switch (sc->link_vars.mac_type) { case ELINK_MAC_TYPE_BMAC: bxe_bmac_stats_update(sc); break; case ELINK_MAC_TYPE_EMAC: bxe_emac_stats_update(sc); break; case ELINK_MAC_TYPE_UMAC: case ELINK_MAC_TYPE_XMAC: bxe_mstat_stats_update(sc); break; case ELINK_MAC_TYPE_NONE: /* unreached */ BLOGD(sc, DBG_STATS, "stats updated by DMAE but no MAC active\n"); return (-1); default: /* unreached */ BLOGE(sc, "stats update failed, unknown MAC type\n"); } ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, new->brb_discard - old->brb_discard); ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, new->brb_truncate - old->brb_truncate); if (!CHIP_IS_E3(sc)) { UPDATE_STAT64_NIG(egress_mac_pkt0, etherstatspkts1024octetsto1522octets); UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets); } memcpy(old, new, sizeof(struct nig_stats)); memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), sizeof(struct mac_stx)); estats->brb_drop_hi = pstats->brb_drop_hi; estats->brb_drop_lo = pstats->brb_drop_lo; pstats->host_port_stats_counter++; if (CHIP_IS_E3(sc)) { lpi_reg = (SC_PORT(sc)) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0; estats->eee_tx_lpi += REG_RD(sc, lpi_reg); } if (!BXE_NOMCP(sc)) { nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer); if (nig_timer_max != estats->nig_timer_max) { estats->nig_timer_max = nig_timer_max; BLOGE(sc, "invalid NIG timer max (%u)\n", estats->nig_timer_max); } } return (0); } static int bxe_storm_stats_validate_counters(struct bxe_softc *sc) { struct stats_counter *counters = &sc->fw_stats_data->storm_counters; uint16_t cur_stats_counter; /* * Make sure we use the value of the counter * used for sending the last stats ramrod. */ BXE_STATS_LOCK(sc); cur_stats_counter = (sc->stats_counter - 1); BXE_STATS_UNLOCK(sc); /* are storm stats valid? */ if (le16toh(counters->xstats_counter) != cur_stats_counter) { BLOGD(sc, DBG_STATS, "stats not updated by xstorm, " "counter 0x%x != stats_counter 0x%x\n", le16toh(counters->xstats_counter), sc->stats_counter); return (-EAGAIN); } if (le16toh(counters->ustats_counter) != cur_stats_counter) { BLOGD(sc, DBG_STATS, "stats not updated by ustorm, " "counter 0x%x != stats_counter 0x%x\n", le16toh(counters->ustats_counter), sc->stats_counter); return (-EAGAIN); } if (le16toh(counters->cstats_counter) != cur_stats_counter) { BLOGD(sc, DBG_STATS, "stats not updated by cstorm, " "counter 0x%x != stats_counter 0x%x\n", le16toh(counters->cstats_counter), sc->stats_counter); return (-EAGAIN); } if (le16toh(counters->tstats_counter) != cur_stats_counter) { BLOGD(sc, DBG_STATS, "stats not updated by tstorm, " "counter 0x%x != stats_counter 0x%x\n", le16toh(counters->tstats_counter), sc->stats_counter); return (-EAGAIN); } return (0); } static int bxe_storm_stats_update(struct bxe_softc *sc) { struct tstorm_per_port_stats *tport = &sc->fw_stats_data->port.tstorm_port_statistics; struct tstorm_per_pf_stats *tfunc = &sc->fw_stats_data->pf.tstorm_pf_statistics; struct host_func_stats *fstats = &sc->func_stats; struct bxe_eth_stats *estats = &sc->eth_stats; struct bxe_eth_stats_old *estats_old = &sc->eth_stats_old; int i; /* vfs stat counter is managed by pf */ if (IS_PF(sc) && bxe_storm_stats_validate_counters(sc)) { return (-EAGAIN); } estats->error_bytes_received_hi = 0; estats->error_bytes_received_lo = 0; for (i = 0; i < sc->num_queues; i++) { struct bxe_fastpath *fp = &sc->fp[i]; struct tstorm_per_queue_stats *tclient = &sc->fw_stats_data->queue_stats[i].tstorm_queue_statistics; struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; struct ustorm_per_queue_stats *uclient = &sc->fw_stats_data->queue_stats[i].ustorm_queue_statistics; struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; struct xstorm_per_queue_stats *xclient = &sc->fw_stats_data->queue_stats[i].xstorm_queue_statistics; struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; struct bxe_eth_q_stats *qstats = &fp->eth_q_stats; struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; uint32_t diff; BLOGD(sc, DBG_STATS, "queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x\n", i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); BLOGD(sc, DBG_STATS, "---------------\n"); UPDATE_QSTAT(tclient->rcv_bcast_bytes, total_broadcast_bytes_received); UPDATE_QSTAT(tclient->rcv_mcast_bytes, total_multicast_bytes_received); UPDATE_QSTAT(tclient->rcv_ucast_bytes, total_unicast_bytes_received); /* * sum to total_bytes_received all * unicast/multicast/broadcast */ qstats->total_bytes_received_hi = qstats->total_broadcast_bytes_received_hi; qstats->total_bytes_received_lo = qstats->total_broadcast_bytes_received_lo; ADD_64(qstats->total_bytes_received_hi, qstats->total_multicast_bytes_received_hi, qstats->total_bytes_received_lo, qstats->total_multicast_bytes_received_lo); ADD_64(qstats->total_bytes_received_hi, qstats->total_unicast_bytes_received_hi, qstats->total_bytes_received_lo, qstats->total_unicast_bytes_received_lo); qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi; qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo; UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received); UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, etherstatsoverrsizepkts, 32); UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16); SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received); SUB_EXTEND_USTAT(mcast_no_buff_pkts, total_multicast_packets_received); SUB_EXTEND_USTAT(bcast_no_buff_pkts, total_broadcast_packets_received); UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); UPDATE_QSTAT(xclient->bcast_bytes_sent, total_broadcast_bytes_transmitted); UPDATE_QSTAT(xclient->mcast_bytes_sent, total_multicast_bytes_transmitted); UPDATE_QSTAT(xclient->ucast_bytes_sent, total_unicast_bytes_transmitted); /* * sum to total_bytes_transmitted all * unicast/multicast/broadcast */ qstats->total_bytes_transmitted_hi = qstats->total_unicast_bytes_transmitted_hi; qstats->total_bytes_transmitted_lo = qstats->total_unicast_bytes_transmitted_lo; ADD_64(qstats->total_bytes_transmitted_hi, qstats->total_broadcast_bytes_transmitted_hi, qstats->total_bytes_transmitted_lo, qstats->total_broadcast_bytes_transmitted_lo); ADD_64(qstats->total_bytes_transmitted_hi, qstats->total_multicast_bytes_transmitted_hi, qstats->total_bytes_transmitted_lo, qstats->total_multicast_bytes_transmitted_lo); UPDATE_EXTEND_XSTAT(ucast_pkts_sent, total_unicast_packets_transmitted); UPDATE_EXTEND_XSTAT(mcast_pkts_sent, total_multicast_packets_transmitted); UPDATE_EXTEND_XSTAT(bcast_pkts_sent, total_broadcast_packets_transmitted); UPDATE_EXTEND_TSTAT(checksum_discard, total_packets_received_checksum_discarded); UPDATE_EXTEND_TSTAT(ttl0_discard, total_packets_received_ttl0_discarded); UPDATE_EXTEND_XSTAT(error_drop_pkts, total_transmitted_dropped_packets_error); /* TPA aggregations completed */ UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations); /* Number of network frames aggregated by TPA */ UPDATE_EXTEND_E_USTAT(coalesced_pkts, total_tpa_aggregated_frames); /* Total number of bytes in completed TPA aggregations */ UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes); UPDATE_ESTAT_QSTAT_64(total_tpa_bytes); UPDATE_FSTAT_QSTAT(total_bytes_received); UPDATE_FSTAT_QSTAT(total_bytes_transmitted); UPDATE_FSTAT_QSTAT(total_unicast_packets_received); UPDATE_FSTAT_QSTAT(total_multicast_packets_received); UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); UPDATE_FSTAT_QSTAT(valid_bytes_received); } ADD_64(estats->total_bytes_received_hi, estats->rx_stat_ifhcinbadoctets_hi, estats->total_bytes_received_lo, estats->rx_stat_ifhcinbadoctets_lo); ADD_64_LE(estats->total_bytes_received_hi, tfunc->rcv_error_bytes.hi, estats->total_bytes_received_lo, tfunc->rcv_error_bytes.lo); ADD_64_LE(estats->error_bytes_received_hi, tfunc->rcv_error_bytes.hi, estats->error_bytes_received_lo, tfunc->rcv_error_bytes.lo); UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); ADD_64(estats->error_bytes_received_hi, estats->rx_stat_ifhcinbadoctets_hi, estats->error_bytes_received_lo, estats->rx_stat_ifhcinbadoctets_lo); if (sc->port.pmf) { struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old; UPDATE_FW_STAT(mac_filter_discard); UPDATE_FW_STAT(mf_tag_discard); UPDATE_FW_STAT(brb_truncate_discard); UPDATE_FW_STAT(mac_discard); } fstats->host_func_stats_start = ++fstats->host_func_stats_end; sc->stats_pending = 0; return (0); } static void bxe_net_stats_update(struct bxe_softc *sc) { for (int i = 0; i < sc->num_queues; i++) if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, le32toh(sc->fp[i].old_tclient.checksum_discard)); } uint64_t bxe_get_counter(if_t ifp, ift_counter cnt) { struct bxe_softc *sc; struct bxe_eth_stats *estats; sc = if_getsoftc(ifp); estats = &sc->eth_stats; switch (cnt) { case IFCOUNTER_IPACKETS: return (bxe_hilo(&estats->total_unicast_packets_received_hi) + bxe_hilo(&estats->total_multicast_packets_received_hi) + bxe_hilo(&estats->total_broadcast_packets_received_hi)); case IFCOUNTER_OPACKETS: return (bxe_hilo(&estats->total_unicast_packets_transmitted_hi) + bxe_hilo(&estats->total_multicast_packets_transmitted_hi) + bxe_hilo(&estats->total_broadcast_packets_transmitted_hi)); case IFCOUNTER_IBYTES: return (bxe_hilo(&estats->total_bytes_received_hi)); case IFCOUNTER_OBYTES: return (bxe_hilo(&estats->total_bytes_transmitted_hi)); case IFCOUNTER_IERRORS: return (bxe_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + bxe_hilo(&estats->etherstatsoverrsizepkts_hi) + bxe_hilo(&estats->brb_drop_hi) + bxe_hilo(&estats->brb_truncate_hi) + bxe_hilo(&estats->rx_stat_dot3statsfcserrors_hi) + bxe_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi) + bxe_hilo(&estats->no_buff_discard_hi)); case IFCOUNTER_OERRORS: return (bxe_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi) + bxe_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi)); case IFCOUNTER_IMCASTS: return (bxe_hilo(&estats->total_multicast_packets_received_hi)); case IFCOUNTER_COLLISIONS: return (bxe_hilo(&estats->tx_stat_etherstatscollisions_hi) + bxe_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + bxe_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi)); default: return (if_get_counter_default(ifp, cnt)); } } static void bxe_drv_stats_update(struct bxe_softc *sc) { struct bxe_eth_stats *estats = &sc->eth_stats; int i; for (i = 0; i < sc->num_queues; i++) { struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats; struct bxe_eth_q_stats_old *qstats_old = &sc->fp[i].eth_q_stats_old; UPDATE_ESTAT_QSTAT(rx_calls); UPDATE_ESTAT_QSTAT(rx_pkts); UPDATE_ESTAT_QSTAT(rx_tpa_pkts); UPDATE_ESTAT_QSTAT(rx_erroneous_jumbo_sge_pkts); UPDATE_ESTAT_QSTAT(rx_bxe_service_rxsgl); UPDATE_ESTAT_QSTAT(rx_jumbo_sge_pkts); UPDATE_ESTAT_QSTAT(rx_soft_errors); UPDATE_ESTAT_QSTAT(rx_hw_csum_errors); UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_ip); UPDATE_ESTAT_QSTAT(rx_ofld_frames_csum_tcp_udp); UPDATE_ESTAT_QSTAT(rx_budget_reached); UPDATE_ESTAT_QSTAT(tx_pkts); UPDATE_ESTAT_QSTAT(tx_soft_errors); UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_ip); UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_tcp); UPDATE_ESTAT_QSTAT(tx_ofld_frames_csum_udp); UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso); UPDATE_ESTAT_QSTAT(tx_ofld_frames_lso_hdr_splits); UPDATE_ESTAT_QSTAT(tx_encap_failures); UPDATE_ESTAT_QSTAT(tx_hw_queue_full); UPDATE_ESTAT_QSTAT(tx_hw_max_queue_depth); UPDATE_ESTAT_QSTAT(tx_dma_mapping_failure); UPDATE_ESTAT_QSTAT(tx_max_drbr_queue_depth); UPDATE_ESTAT_QSTAT(tx_window_violation_std); UPDATE_ESTAT_QSTAT(tx_window_violation_tso); //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_ipv6); //UPDATE_ESTAT_QSTAT(tx_unsupported_tso_request_not_tcp); UPDATE_ESTAT_QSTAT(tx_chain_lost_mbuf); UPDATE_ESTAT_QSTAT(tx_frames_deferred); UPDATE_ESTAT_QSTAT(tx_queue_xoff); /* mbuf driver statistics */ UPDATE_ESTAT_QSTAT(mbuf_defrag_attempts); UPDATE_ESTAT_QSTAT(mbuf_defrag_failures); UPDATE_ESTAT_QSTAT(mbuf_rx_bd_alloc_failed); UPDATE_ESTAT_QSTAT(mbuf_rx_bd_mapping_failed); UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_alloc_failed); UPDATE_ESTAT_QSTAT(mbuf_rx_tpa_mapping_failed); UPDATE_ESTAT_QSTAT(mbuf_rx_sge_alloc_failed); UPDATE_ESTAT_QSTAT(mbuf_rx_sge_mapping_failed); /* track the number of allocated mbufs */ UPDATE_ESTAT_QSTAT(mbuf_alloc_tx); UPDATE_ESTAT_QSTAT(mbuf_alloc_rx); UPDATE_ESTAT_QSTAT(mbuf_alloc_sge); UPDATE_ESTAT_QSTAT(mbuf_alloc_tpa); } } static uint8_t bxe_edebug_stats_stopped(struct bxe_softc *sc) { uint32_t val; if (SHMEM2_HAS(sc, edebug_driver_if[1])) { val = SHMEM2_RD(sc, edebug_driver_if[1]); if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) { return (TRUE); } } return (FALSE); } static void bxe_stats_update(struct bxe_softc *sc) { uint32_t *stats_comp = BXE_SP(sc, stats_comp); if (bxe_edebug_stats_stopped(sc)) { return; } if (IS_PF(sc)) { if (*stats_comp != DMAE_COMP_VAL) { return; } if (sc->port.pmf) { bxe_hw_stats_update(sc); } if (bxe_storm_stats_update(sc)) { if (sc->stats_pending++ == 3) { if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { - if(sc->trigger_grcdump) { - /* taking grcdump */ - bxe_grc_dump(sc); - } - atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); - taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); + atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); + taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); } } return; } } else { /* * VF doesn't collect HW statistics, and doesn't get completions, * performs only update. */ bxe_storm_stats_update(sc); } bxe_net_stats_update(sc); bxe_drv_stats_update(sc); /* vf is done */ if (IS_VF(sc)) { return; } bxe_hw_stats_post(sc); bxe_storm_stats_post(sc); } static void bxe_port_stats_stop(struct bxe_softc *sc) { struct dmae_cmd *dmae; uint32_t opcode; int loader_idx = PMF_DMAE_C(sc); uint32_t *stats_comp = BXE_SP(sc, stats_comp); sc->executer_idx = 0; opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, FALSE, 0); if (sc->port.port_stx) { dmae = BXE_SP(sc, dmae[sc->executer_idx++]); if (sc->func_stx) { dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); } else { dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); } dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats)); dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats)); dmae->dst_addr_lo = sc->port.port_stx >> 2; dmae->dst_addr_hi = 0; dmae->len = bxe_get_port_stats_dma_len(sc); if (sc->func_stx) { dmae->comp_addr_lo = (dmae_reg_go_c[loader_idx] >> 2); dmae->comp_addr_hi = 0; dmae->comp_val = 1; } else { dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; } } if (sc->func_stx) { dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = bxe_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, func_stats)); dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, func_stats)); dmae->dst_addr_lo = (sc->func_stx >> 2); dmae->dst_addr_hi = 0; dmae->len = (sizeof(struct host_func_stats) >> 2); dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; } } static void bxe_stats_stop(struct bxe_softc *sc) { uint8_t update = FALSE; bxe_stats_comp(sc); if (sc->port.pmf) { update = bxe_hw_stats_update(sc) == 0; } update |= bxe_storm_stats_update(sc) == 0; if (update) { bxe_net_stats_update(sc); if (sc->port.pmf) { bxe_port_stats_stop(sc); } bxe_hw_stats_post(sc); bxe_stats_comp(sc); } } static void bxe_stats_do_nothing(struct bxe_softc *sc) { return; } static const struct { void (*action)(struct bxe_softc *sc); enum bxe_stats_state next_state; } bxe_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { { /* DISABLED PMF */ { bxe_stats_pmf_update, STATS_STATE_DISABLED }, /* LINK_UP */ { bxe_stats_start, STATS_STATE_ENABLED }, /* UPDATE */ { bxe_stats_do_nothing, STATS_STATE_DISABLED }, /* STOP */ { bxe_stats_do_nothing, STATS_STATE_DISABLED } }, { /* ENABLED PMF */ { bxe_stats_pmf_start, STATS_STATE_ENABLED }, /* LINK_UP */ { bxe_stats_restart, STATS_STATE_ENABLED }, /* UPDATE */ { bxe_stats_update, STATS_STATE_ENABLED }, /* STOP */ { bxe_stats_stop, STATS_STATE_DISABLED } } }; void bxe_stats_handle(struct bxe_softc *sc, enum bxe_stats_event event) { enum bxe_stats_state state; if (__predict_false(sc->panic)) { return; } BXE_STATS_LOCK(sc); state = sc->stats_state; sc->stats_state = bxe_stats_stm[state][event].next_state; BXE_STATS_UNLOCK(sc); bxe_stats_stm[state][event].action(sc); if (event != STATS_EVENT_UPDATE) { BLOGD(sc, DBG_STATS, "state %d -> event %d -> state %d\n", state, event, sc->stats_state); } } static void bxe_port_stats_base_init(struct bxe_softc *sc) { struct dmae_cmd *dmae; uint32_t *stats_comp = BXE_SP(sc, stats_comp); /* sanity */ if (!sc->port.pmf || !sc->port.port_stx) { BLOGE(sc, "BUG!\n"); return; } sc->executer_idx = 0; dmae = BXE_SP(sc, dmae[sc->executer_idx++]); dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC, TRUE, DMAE_COMP_PCI); dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, port_stats)); dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, port_stats)); dmae->dst_addr_lo = (sc->port.port_stx >> 2); dmae->dst_addr_hi = 0; dmae->len = bxe_get_port_stats_dma_len(sc); dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; bxe_hw_stats_post(sc); bxe_stats_comp(sc); } /* * This function will prepare the statistics ramrod data the way * we will only have to increment the statistics counter and * send the ramrod each time we have to. */ static void bxe_prep_fw_stats_req(struct bxe_softc *sc) { int i; int first_queue_query_index; struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr; bus_addr_t cur_data_offset; struct stats_query_entry *cur_query_entry; stats_hdr->cmd_num = sc->fw_stats_num; stats_hdr->drv_stats_counter = 0; /* * The storm_counters struct contains the counters of completed * statistics requests per storm which are incremented by FW * each time it completes hadning a statistics ramrod. We will * check these counters in the timer handler and discard a * (statistics) ramrod completion. */ cur_data_offset = (sc->fw_stats_data_mapping + offsetof(struct bxe_fw_stats_data, storm_counters)); stats_hdr->stats_counters_addrs.hi = htole32(U64_HI(cur_data_offset)); stats_hdr->stats_counters_addrs.lo = htole32(U64_LO(cur_data_offset)); /* * Prepare the first stats ramrod (will be completed with * the counters equal to zero) - init counters to somethig different. */ memset(&sc->fw_stats_data->storm_counters, 0xff, sizeof(struct stats_counter)); /**** Port FW statistics data ****/ cur_data_offset = (sc->fw_stats_data_mapping + offsetof(struct bxe_fw_stats_data, port)); cur_query_entry = &sc->fw_stats_req->query[BXE_PORT_QUERY_IDX]; cur_query_entry->kind = STATS_TYPE_PORT; /* For port query index is a DONT CARE */ cur_query_entry->index = SC_PORT(sc); /* For port query funcID is a DONT CARE */ cur_query_entry->funcID = htole16(SC_FUNC(sc)); cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); /**** PF FW statistics data ****/ cur_data_offset = (sc->fw_stats_data_mapping + offsetof(struct bxe_fw_stats_data, pf)); cur_query_entry = &sc->fw_stats_req->query[BXE_PF_QUERY_IDX]; cur_query_entry->kind = STATS_TYPE_PF; /* For PF query index is a DONT CARE */ cur_query_entry->index = SC_PORT(sc); cur_query_entry->funcID = htole16(SC_FUNC(sc)); cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); /**** Clients' queries ****/ cur_data_offset = (sc->fw_stats_data_mapping + offsetof(struct bxe_fw_stats_data, queue_stats)); /* * First queue query index depends whether FCoE offloaded request will * be included in the ramrod */ first_queue_query_index = (BXE_FIRST_QUEUE_QUERY_IDX - 1); for (i = 0; i < sc->num_queues; i++) { cur_query_entry = &sc->fw_stats_req->query[first_queue_query_index + i]; cur_query_entry->kind = STATS_TYPE_QUEUE; cur_query_entry->index = bxe_stats_id(&sc->fp[i]); cur_query_entry->funcID = htole16(SC_FUNC(sc)); cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); cur_data_offset += sizeof(struct per_queue_stats); } } void bxe_stats_init(struct bxe_softc *sc) { int /*abs*/port = SC_PORT(sc); int mb_idx = SC_FW_MB_IDX(sc); int i; sc->stats_pending = 0; sc->executer_idx = 0; sc->stats_counter = 0; /* port and func stats for management */ if (!BXE_NOMCP(sc)) { sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx); sc->func_stx = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_param); } else { sc->port.port_stx = 0; sc->func_stx = 0; } BLOGD(sc, DBG_STATS, "port_stx 0x%x func_stx 0x%x\n", sc->port.port_stx, sc->func_stx); /* pmf should retrieve port statistics from SP on a non-init*/ if (!sc->stats_init && sc->port.pmf && sc->port.port_stx) { bxe_stats_handle(sc, STATS_EVENT_PMF); } port = SC_PORT(sc); /* port stats */ memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats)); sc->port.old_nig_stats.brb_discard = REG_RD(sc, NIG_REG_STAT0_BRB_DISCARD + port*0x38); sc->port.old_nig_stats.brb_truncate = REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); if (!CHIP_IS_E3(sc)) { REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2); REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2); } /* function stats */ for (i = 0; i < sc->num_queues; i++) { memset(&sc->fp[i].old_tclient, 0, sizeof(sc->fp[i].old_tclient)); memset(&sc->fp[i].old_uclient, 0, sizeof(sc->fp[i].old_uclient)); memset(&sc->fp[i].old_xclient, 0, sizeof(sc->fp[i].old_xclient)); if (sc->stats_init) { memset(&sc->fp[i].eth_q_stats, 0, sizeof(sc->fp[i].eth_q_stats)); memset(&sc->fp[i].eth_q_stats_old, 0, sizeof(sc->fp[i].eth_q_stats_old)); } } /* prepare statistics ramrod data */ bxe_prep_fw_stats_req(sc); if (sc->stats_init) { memset(&sc->net_stats_old, 0, sizeof(sc->net_stats_old)); memset(&sc->fw_stats_old, 0, sizeof(sc->fw_stats_old)); memset(&sc->eth_stats_old, 0, sizeof(sc->eth_stats_old)); memset(&sc->eth_stats, 0, sizeof(sc->eth_stats)); memset(&sc->func_stats, 0, sizeof(sc->func_stats)); /* Clean SP from previous statistics */ if (sc->func_stx) { memset(BXE_SP(sc, func_stats), 0, sizeof(struct host_func_stats)); bxe_func_stats_init(sc); bxe_hw_stats_post(sc); bxe_stats_comp(sc); } } sc->stats_state = STATS_STATE_DISABLED; if (sc->port.pmf && sc->port.port_stx) { bxe_port_stats_base_init(sc); } /* mark the end of statistics initializiation */ sc->stats_init = FALSE; } void bxe_save_statistics(struct bxe_softc *sc) { int i; /* save queue statistics */ for (i = 0; i < sc->num_queues; i++) { struct bxe_fastpath *fp = &sc->fp[i]; struct bxe_eth_q_stats *qstats = &fp->eth_q_stats; struct bxe_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); UPDATE_QSTAT_OLD(total_tpa_bytes_hi); UPDATE_QSTAT_OLD(total_tpa_bytes_lo); } /* store port firmware statistics */ if (sc->port.pmf) { struct bxe_eth_stats *estats = &sc->eth_stats; struct bxe_fw_port_stats_old *fwstats = &sc->fw_stats_old; struct host_port_stats *pstats = BXE_SP(sc, port_stats); fwstats->pfc_frames_rx_hi = pstats->pfc_frames_rx_hi; fwstats->pfc_frames_rx_lo = pstats->pfc_frames_rx_lo; fwstats->pfc_frames_tx_hi = pstats->pfc_frames_tx_hi; fwstats->pfc_frames_tx_lo = pstats->pfc_frames_tx_lo; if (IS_MF(sc)) { UPDATE_FW_STAT_OLD(mac_filter_discard); UPDATE_FW_STAT_OLD(mf_tag_discard); UPDATE_FW_STAT_OLD(brb_truncate_discard); UPDATE_FW_STAT_OLD(mac_discard); } } } void bxe_afex_collect_stats(struct bxe_softc *sc, void *void_afex_stats, uint32_t stats_type) { int i; struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; struct bxe_eth_stats *estats = &sc->eth_stats; memset(afex_stats, 0, sizeof(struct afex_stats)); for (i = 0; i < sc->num_queues; i++) { struct bxe_eth_q_stats *qstats = &sc->fp[i].eth_q_stats; ADD_64(afex_stats->rx_unicast_bytes_hi, qstats->total_unicast_bytes_received_hi, afex_stats->rx_unicast_bytes_lo, qstats->total_unicast_bytes_received_lo); ADD_64(afex_stats->rx_broadcast_bytes_hi, qstats->total_broadcast_bytes_received_hi, afex_stats->rx_broadcast_bytes_lo, qstats->total_broadcast_bytes_received_lo); ADD_64(afex_stats->rx_multicast_bytes_hi, qstats->total_multicast_bytes_received_hi, afex_stats->rx_multicast_bytes_lo, qstats->total_multicast_bytes_received_lo); ADD_64(afex_stats->rx_unicast_frames_hi, qstats->total_unicast_packets_received_hi, afex_stats->rx_unicast_frames_lo, qstats->total_unicast_packets_received_lo); ADD_64(afex_stats->rx_broadcast_frames_hi, qstats->total_broadcast_packets_received_hi, afex_stats->rx_broadcast_frames_lo, qstats->total_broadcast_packets_received_lo); ADD_64(afex_stats->rx_multicast_frames_hi, qstats->total_multicast_packets_received_hi, afex_stats->rx_multicast_frames_lo, qstats->total_multicast_packets_received_lo); /* * sum to rx_frames_discarded all discarded * packets due to size, ttl0 and checksum */ ADD_64(afex_stats->rx_frames_discarded_hi, qstats->total_packets_received_checksum_discarded_hi, afex_stats->rx_frames_discarded_lo, qstats->total_packets_received_checksum_discarded_lo); ADD_64(afex_stats->rx_frames_discarded_hi, qstats->total_packets_received_ttl0_discarded_hi, afex_stats->rx_frames_discarded_lo, qstats->total_packets_received_ttl0_discarded_lo); ADD_64(afex_stats->rx_frames_discarded_hi, qstats->etherstatsoverrsizepkts_hi, afex_stats->rx_frames_discarded_lo, qstats->etherstatsoverrsizepkts_lo); ADD_64(afex_stats->rx_frames_dropped_hi, qstats->no_buff_discard_hi, afex_stats->rx_frames_dropped_lo, qstats->no_buff_discard_lo); ADD_64(afex_stats->tx_unicast_bytes_hi, qstats->total_unicast_bytes_transmitted_hi, afex_stats->tx_unicast_bytes_lo, qstats->total_unicast_bytes_transmitted_lo); ADD_64(afex_stats->tx_broadcast_bytes_hi, qstats->total_broadcast_bytes_transmitted_hi, afex_stats->tx_broadcast_bytes_lo, qstats->total_broadcast_bytes_transmitted_lo); ADD_64(afex_stats->tx_multicast_bytes_hi, qstats->total_multicast_bytes_transmitted_hi, afex_stats->tx_multicast_bytes_lo, qstats->total_multicast_bytes_transmitted_lo); ADD_64(afex_stats->tx_unicast_frames_hi, qstats->total_unicast_packets_transmitted_hi, afex_stats->tx_unicast_frames_lo, qstats->total_unicast_packets_transmitted_lo); ADD_64(afex_stats->tx_broadcast_frames_hi, qstats->total_broadcast_packets_transmitted_hi, afex_stats->tx_broadcast_frames_lo, qstats->total_broadcast_packets_transmitted_lo); ADD_64(afex_stats->tx_multicast_frames_hi, qstats->total_multicast_packets_transmitted_hi, afex_stats->tx_multicast_frames_lo, qstats->total_multicast_packets_transmitted_lo); ADD_64(afex_stats->tx_frames_dropped_hi, qstats->total_transmitted_dropped_packets_error_hi, afex_stats->tx_frames_dropped_lo, qstats->total_transmitted_dropped_packets_error_lo); } /* * If port stats are requested, add them to the PMF * stats, as anyway they will be accumulated by the * MCP before sent to the switch */ if ((sc->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { ADD_64(afex_stats->rx_frames_dropped_hi, 0, afex_stats->rx_frames_dropped_lo, estats->mac_filter_discard); ADD_64(afex_stats->rx_frames_dropped_hi, 0, afex_stats->rx_frames_dropped_lo, estats->brb_truncate_discard); ADD_64(afex_stats->rx_frames_discarded_hi, 0, afex_stats->rx_frames_discarded_lo, estats->mac_discard); } } Index: projects/release-pkg/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c =================================================================== --- projects/release-pkg/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c (revision 297926) +++ projects/release-pkg/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c (revision 297927) @@ -1,2904 +1,2897 @@ /*- * Copyright (c) 2010-2012 Citrix Inc. * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2004-2006 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet6.h" #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "hv_net_vsc.h" #include "hv_rndis.h" #include "hv_rndis_filter.h" #define hv_chan_rxr hv_chan_priv1 #define hv_chan_txr hv_chan_priv2 /* Short for Hyper-V network interface */ #define NETVSC_DEVNAME "hn" /* * It looks like offset 0 of buf is reserved to hold the softc pointer. * The sc pointer evidently not needed, and is not presently populated. * The packet offset is where the netvsc_packet starts in the buffer. */ #define HV_NV_SC_PTR_OFFSET_IN_BUF 0 #define HV_NV_PACKET_OFFSET_IN_BUF 16 /* YYY should get it from the underlying channel */ #define HN_TX_DESC_CNT 512 #define HN_LROENT_CNT_DEF 128 #define HN_RING_CNT_DEF_MAX 8 #define HN_RNDIS_MSG_LEN \ (sizeof(rndis_msg) + \ RNDIS_HASH_PPI_SIZE + \ RNDIS_VLAN_PPI_SIZE + \ RNDIS_TSO_PPI_SIZE + \ RNDIS_CSUM_PPI_SIZE) #define HN_RNDIS_MSG_BOUNDARY PAGE_SIZE #define HN_RNDIS_MSG_ALIGN CACHE_LINE_SIZE #define HN_TX_DATA_BOUNDARY PAGE_SIZE #define HN_TX_DATA_MAXSIZE IP_MAXPACKET #define HN_TX_DATA_SEGSIZE PAGE_SIZE #define HN_TX_DATA_SEGCNT_MAX \ (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS) #define HN_DIRECT_TX_SIZE_DEF 128 struct hn_txdesc { #ifndef HN_USE_TXDESC_BUFRING SLIST_ENTRY(hn_txdesc) link; #endif struct mbuf *m; struct hn_tx_ring *txr; int refs; uint32_t flags; /* HN_TXD_FLAG_ */ netvsc_packet netvsc_pkt; /* XXX to be removed */ bus_dmamap_t data_dmap; bus_addr_t rndis_msg_paddr; rndis_msg *rndis_msg; bus_dmamap_t rndis_msg_dmap; }; #define HN_TXD_FLAG_ONLIST 0x1 #define HN_TXD_FLAG_DMAMAP 0x2 /* * Only enable UDP checksum offloading when it is on 2012R2 or * later. UDP checksum offloading doesn't work on earlier * Windows releases. */ #define HN_CSUM_ASSIST_WIN8 (CSUM_IP | CSUM_TCP) #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP) #define HN_LRO_LENLIM_MULTIRX_DEF (12 * ETHERMTU) #define HN_LRO_LENLIM_DEF (25 * ETHERMTU) /* YYY 2*MTU is a bit rough, but should be good enough. */ #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu) #define HN_LRO_ACKCNT_DEF 1 /* * Be aware that this sleepable mutex will exhibit WITNESS errors when * certain TCP and ARP code paths are taken. This appears to be a * well-known condition, as all other drivers checked use a sleeping * mutex to protect their transmit paths. * Also Be aware that mutexes do not play well with semaphores, and there * is a conflicting semaphore in a certain channel code path. */ #define NV_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF) #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock) #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED) #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock) #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock) /* * Globals */ int hv_promisc_mode = 0; /* normal mode by default */ SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD, NULL, "Hyper-V network interface"); /* Trust tcp segements verification on host side. */ static int hn_trust_hosttcp = 1; SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN, &hn_trust_hosttcp, 0, "Trust tcp segement verification on host side, " "when csum info is missing (global setting)"); /* Trust udp datagrams verification on host side. */ static int hn_trust_hostudp = 1; SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN, &hn_trust_hostudp, 0, "Trust udp datagram verification on host side, " "when csum info is missing (global setting)"); /* Trust ip packets verification on host side. */ static int hn_trust_hostip = 1; SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN, &hn_trust_hostip, 0, "Trust ip packet verification on host side, " "when csum info is missing (global setting)"); #if __FreeBSD_version >= 1100045 /* Limit TSO burst size */ static int hn_tso_maxlen = 0; SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN, &hn_tso_maxlen, 0, "TSO burst limit"); #endif /* Limit chimney send size */ static int hn_tx_chimney_size = 0; SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN, &hn_tx_chimney_size, 0, "Chimney send packet size limit"); /* Limit the size of packet for direct transmission */ static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF; SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN, &hn_direct_tx_size, 0, "Size of the packet for direct transmission"); #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 static int hn_lro_entry_count = HN_LROENT_CNT_DEF; SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN, &hn_lro_entry_count, 0, "LRO entry count"); #endif #endif static int hn_share_tx_taskq = 0; SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN, &hn_share_tx_taskq, 0, "Enable shared TX taskqueue"); static struct taskqueue *hn_tx_taskq; #ifndef HN_USE_TXDESC_BUFRING static int hn_use_txdesc_bufring = 0; #else static int hn_use_txdesc_bufring = 1; #endif SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD, &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors"); static int hn_bind_tx_taskq = -1; SYSCTL_INT(_hw_hn, OID_AUTO, bind_tx_taskq, CTLFLAG_RDTUN, &hn_bind_tx_taskq, 0, "Bind TX taskqueue to the specified cpu"); static int hn_use_if_start = 0; SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN, &hn_use_if_start, 0, "Use if_start TX method"); static int hn_chan_cnt = 0; SYSCTL_INT(_hw_hn, OID_AUTO, chan_cnt, CTLFLAG_RDTUN, &hn_chan_cnt, 0, "# of channels to use; each channel has one RX ring and one TX ring"); static int hn_tx_ring_cnt = 0; SYSCTL_INT(_hw_hn, OID_AUTO, tx_ring_cnt, CTLFLAG_RDTUN, &hn_tx_ring_cnt, 0, "# of TX rings to use"); static u_int hn_cpu_index; /* * Forward declarations */ static void hn_stop(hn_softc_t *sc); static void hn_ifinit_locked(hn_softc_t *sc); static void hn_ifinit(void *xsc); static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static int hn_start_locked(struct hn_tx_ring *txr, int len); static void hn_start(struct ifnet *ifp); static void hn_start_txeof(struct hn_tx_ring *); static int hn_ifmedia_upd(struct ifnet *ifp); static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); #if __FreeBSD_version >= 1100099 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS); static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS); #endif static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS); static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS); static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS); static int hn_check_iplen(const struct mbuf *, int); static int hn_create_tx_ring(struct hn_softc *, int); static void hn_destroy_tx_ring(struct hn_tx_ring *); static int hn_create_tx_data(struct hn_softc *, int); static void hn_destroy_tx_data(struct hn_softc *); static void hn_start_taskfunc(void *, int); static void hn_start_txeof_taskfunc(void *, int); static void hn_stop_tx_tasks(struct hn_softc *); static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **); static void hn_create_rx_data(struct hn_softc *sc, int); static void hn_destroy_rx_data(struct hn_softc *sc); static void hn_set_tx_chimney_size(struct hn_softc *, int); static void hn_channel_attach(struct hn_softc *, struct hv_vmbus_channel *); static int hn_transmit(struct ifnet *, struct mbuf *); static void hn_xmit_qflush(struct ifnet *); static int hn_xmit(struct hn_tx_ring *, int); static void hn_xmit_txeof(struct hn_tx_ring *); static void hn_xmit_taskfunc(void *, int); static void hn_xmit_txeof_taskfunc(void *, int); #if __FreeBSD_version >= 1100099 static void hn_set_lro_lenlim(struct hn_softc *sc, int lenlim) { int i; for (i = 0; i < sc->hn_rx_ring_inuse; ++i) sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim; } #endif static int hn_ifmedia_upd(struct ifnet *ifp __unused) { return EOPNOTSUPP; } static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct hn_softc *sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!sc->hn_carrier) { ifmr->ifm_active |= IFM_NONE; return; } ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_10G_T | IFM_FDX; } /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */ static const hv_guid g_net_vsc_device_type = { .data = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46, 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E} }; /* * Standard probe entry point. * */ static int netvsc_probe(device_t dev) { const char *p; p = vmbus_get_type(dev); if (!memcmp(p, &g_net_vsc_device_type.data, sizeof(hv_guid))) { device_set_desc(dev, "Synthetic Network Interface"); if (bootverbose) printf("Netvsc probe... DONE \n"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Standard attach entry point. * * Called when the driver is loaded. It allocates needed resources, * and initializes the "hardware" and software. */ static int netvsc_attach(device_t dev) { struct hv_device *device_ctx = vmbus_get_devctx(dev); struct hv_vmbus_channel *chan; netvsc_device_info device_info; hn_softc_t *sc; int unit = device_get_unit(dev); struct ifnet *ifp = NULL; int error, ring_cnt, tx_ring_cnt; #if __FreeBSD_version >= 1100045 int tso_maxlen; #endif sc = device_get_softc(dev); - if (sc == NULL) { - return (ENOMEM); - } bzero(sc, sizeof(hn_softc_t)); sc->hn_unit = unit; sc->hn_dev = dev; if (hn_tx_taskq == NULL) { sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, taskqueue_thread_enqueue, &sc->hn_tx_taskq); if (hn_bind_tx_taskq >= 0) { int cpu = hn_bind_tx_taskq; cpuset_t cpu_set; if (cpu > mp_ncpus - 1) cpu = mp_ncpus - 1; CPU_SETOF(cpu, &cpu_set); taskqueue_start_threads_cpuset(&sc->hn_tx_taskq, 1, PI_NET, &cpu_set, "%s tx", device_get_nameunit(dev)); } else { taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx", device_get_nameunit(dev)); } } else { sc->hn_tx_taskq = hn_tx_taskq; } NV_LOCK_INIT(sc, "NetVSCLock"); sc->hn_dev_obj = device_ctx; ifp = sc->hn_ifp = if_alloc(IFT_ETHER); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); /* * Figure out the # of RX rings (ring_cnt) and the # of TX rings * to use (tx_ring_cnt). * * NOTE: * The # of RX rings to use is same as the # of channels to use. */ ring_cnt = hn_chan_cnt; if (ring_cnt <= 0) { /* Default */ ring_cnt = mp_ncpus; if (ring_cnt > HN_RING_CNT_DEF_MAX) ring_cnt = HN_RING_CNT_DEF_MAX; } else if (ring_cnt > mp_ncpus) { ring_cnt = mp_ncpus; } tx_ring_cnt = hn_tx_ring_cnt; if (tx_ring_cnt <= 0 || tx_ring_cnt > ring_cnt) tx_ring_cnt = ring_cnt; if (hn_use_if_start) { /* ifnet.if_start only needs one TX ring. */ tx_ring_cnt = 1; } /* * Set the leader CPU for channels. */ sc->hn_cpu = atomic_fetchadd_int(&hn_cpu_index, ring_cnt) % mp_ncpus; error = hn_create_tx_data(sc, tx_ring_cnt); if (error) goto failed; hn_create_rx_data(sc, ring_cnt); /* * Associate the first TX/RX ring w/ the primary channel. */ chan = device_ctx->channel; KASSERT(HV_VMBUS_CHAN_ISPRIMARY(chan), ("not primary channel")); KASSERT(chan->offer_msg.offer.sub_channel_index == 0, ("primary channel subidx %u", chan->offer_msg.offer.sub_channel_index)); hn_channel_attach(sc, chan); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = hn_ioctl; ifp->if_init = hn_ifinit; /* needed by hv_rf_on_device_add() code */ ifp->if_mtu = ETHERMTU; if (hn_use_if_start) { ifp->if_start = hn_start; IFQ_SET_MAXLEN(&ifp->if_snd, 512); ifp->if_snd.ifq_drv_maxlen = 511; IFQ_SET_READY(&ifp->if_snd); } else { ifp->if_transmit = hn_transmit; ifp->if_qflush = hn_xmit_qflush; } ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts); ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO); /* XXX ifmedia_set really should do this for us */ sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media; /* * Tell upper layers that we support full VLAN capability. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO | IFCAP_LRO; ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO | IFCAP_LRO; ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO; error = hv_rf_on_device_add(device_ctx, &device_info, ring_cnt); if (error) goto failed; KASSERT(sc->net_dev->num_channel > 0 && sc->net_dev->num_channel <= sc->hn_rx_ring_inuse, ("invalid channel count %u, should be less than %d", sc->net_dev->num_channel, sc->hn_rx_ring_inuse)); /* * Set the # of TX/RX rings that could be used according to * the # of channels that host offered. */ if (sc->hn_tx_ring_inuse > sc->net_dev->num_channel) sc->hn_tx_ring_inuse = sc->net_dev->num_channel; sc->hn_rx_ring_inuse = sc->net_dev->num_channel; device_printf(dev, "%d TX ring, %d RX ring\n", sc->hn_tx_ring_inuse, sc->hn_rx_ring_inuse); #if __FreeBSD_version >= 1100099 if (sc->hn_rx_ring_inuse > 1) { /* * Reduce TCP segment aggregation limit for multiple * RX rings to increase ACK timeliness. */ hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MULTIRX_DEF); } #endif if (device_info.link_state == 0) { sc->hn_carrier = 1; } #if __FreeBSD_version >= 1100045 tso_maxlen = hn_tso_maxlen; if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET) tso_maxlen = IP_MAXPACKET; ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX; ifp->if_hw_tsomaxsegsize = PAGE_SIZE; ifp->if_hw_tsomax = tso_maxlen - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); #endif ether_ifattach(ifp, device_info.mac_addr); #if __FreeBSD_version >= 1100045 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax, ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize); #endif sc->hn_tx_chimney_max = sc->net_dev->send_section_size; hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max); if (hn_tx_chimney_size > 0 && hn_tx_chimney_size < sc->hn_tx_chimney_max) hn_set_tx_chimney_size(sc, hn_tx_chimney_size); return (0); failed: hn_destroy_tx_data(sc); if (ifp != NULL) if_free(ifp); return (error); } /* * Standard detach entry point */ static int netvsc_detach(device_t dev) { struct hn_softc *sc = device_get_softc(dev); struct hv_device *hv_device = vmbus_get_devctx(dev); if (bootverbose) printf("netvsc_detach\n"); /* * XXXKYS: Need to clean up all our * driver state; this is the driver * unloading. */ /* * XXXKYS: Need to stop outgoing traffic and unregister * the netdevice. */ hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL); hn_stop_tx_tasks(sc); ifmedia_removeall(&sc->hn_media); hn_destroy_rx_data(sc); hn_destroy_tx_data(sc); if (sc->hn_tx_taskq != hn_tx_taskq) taskqueue_free(sc->hn_tx_taskq); return (0); } /* * Standard shutdown entry point */ static int netvsc_shutdown(device_t dev) { return (0); } static __inline int hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs) { struct mbuf *m = *m_head; int error; error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { struct mbuf *m_new; m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX); if (m_new == NULL) return ENOBUFS; else *m_head = m = m_new; txr->hn_tx_collapsed++; error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT); } if (!error) { bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap, BUS_DMASYNC_PREWRITE); txd->flags |= HN_TXD_FLAG_DMAMAP; } return error; } static __inline void hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd) { if (txd->flags & HN_TXD_FLAG_DMAMAP) { bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->hn_tx_data_dtag, txd->data_dmap); txd->flags &= ~HN_TXD_FLAG_DMAMAP; } } static __inline int hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd) { KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0, ("put an onlist txd %#x", txd->flags)); KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs)); if (atomic_fetchadd_int(&txd->refs, -1) != 1) return 0; hn_txdesc_dmamap_unload(txr, txd); if (txd->m != NULL) { m_freem(txd->m); txd->m = NULL; } txd->flags |= HN_TXD_FLAG_ONLIST; #ifndef HN_USE_TXDESC_BUFRING mtx_lock_spin(&txr->hn_txlist_spin); KASSERT(txr->hn_txdesc_avail >= 0 && txr->hn_txdesc_avail < txr->hn_txdesc_cnt, ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail)); txr->hn_txdesc_avail++; SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); mtx_unlock_spin(&txr->hn_txlist_spin); #else atomic_add_int(&txr->hn_txdesc_avail, 1); buf_ring_enqueue(txr->hn_txdesc_br, txd); #endif return 1; } static __inline struct hn_txdesc * hn_txdesc_get(struct hn_tx_ring *txr) { struct hn_txdesc *txd; #ifndef HN_USE_TXDESC_BUFRING mtx_lock_spin(&txr->hn_txlist_spin); txd = SLIST_FIRST(&txr->hn_txlist); if (txd != NULL) { KASSERT(txr->hn_txdesc_avail > 0, ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail)); txr->hn_txdesc_avail--; SLIST_REMOVE_HEAD(&txr->hn_txlist, link); } mtx_unlock_spin(&txr->hn_txlist_spin); #else txd = buf_ring_dequeue_sc(txr->hn_txdesc_br); #endif if (txd != NULL) { #ifdef HN_USE_TXDESC_BUFRING atomic_subtract_int(&txr->hn_txdesc_avail, 1); #endif KASSERT(txd->m == NULL && txd->refs == 0 && (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd")); txd->flags &= ~HN_TXD_FLAG_ONLIST; txd->refs = 1; } return txd; } static __inline void hn_txdesc_hold(struct hn_txdesc *txd) { /* 0->1 transition will never work */ KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs)); atomic_add_int(&txd->refs, 1); } static void hn_tx_done(struct hv_vmbus_channel *chan, void *xpkt) { netvsc_packet *packet = xpkt; struct hn_txdesc *txd; struct hn_tx_ring *txr; txd = (struct hn_txdesc *)(uintptr_t) packet->compl.send.send_completion_tid; txr = txd->txr; KASSERT(txr->hn_chan == chan, ("channel mismatch, on channel%u, should be channel%u", chan->offer_msg.offer.sub_channel_index, txr->hn_chan->offer_msg.offer.sub_channel_index)); txr->hn_has_txeof = 1; hn_txdesc_put(txr, txd); } void netvsc_channel_rollup(struct hv_vmbus_channel *chan) { struct hn_tx_ring *txr = chan->hv_chan_txr; #if defined(INET) || defined(INET6) struct hn_rx_ring *rxr = chan->hv_chan_rxr; tcp_lro_flush_all(&rxr->hn_lro); #endif /* * NOTE: * 'txr' could be NULL, if multiple channels and * ifnet.if_start method are enabled. */ if (txr == NULL || !txr->hn_has_txeof) return; txr->hn_has_txeof = 0; txr->hn_txeof(txr); } /* * NOTE: * If this function fails, then both txd and m_head0 will be freed. */ static int hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0) { bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX]; int error, nsegs, i; struct mbuf *m_head = *m_head0; netvsc_packet *packet; rndis_msg *rndis_mesg; rndis_packet *rndis_pkt; rndis_per_packet_info *rppi; struct ndis_hash_info *hash_info; uint32_t rndis_msg_size; packet = &txd->netvsc_pkt; packet->is_data_pkt = TRUE; packet->tot_data_buf_len = m_head->m_pkthdr.len; /* * extension points to the area reserved for the * rndis_filter_packet, which is placed just after * the netvsc_packet (and rppi struct, if present; * length is updated later). */ rndis_mesg = txd->rndis_msg; /* XXX not necessary */ memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN); rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG; rndis_pkt = &rndis_mesg->msg.packet; rndis_pkt->data_offset = sizeof(rndis_packet); rndis_pkt->data_length = packet->tot_data_buf_len; rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet); rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet); /* * Set the hash info for this packet, so that the host could * dispatch the TX done event for this packet back to this TX * ring's channel. */ rndis_msg_size += RNDIS_HASH_PPI_SIZE; rppi = hv_set_rppi_data(rndis_mesg, RNDIS_HASH_PPI_SIZE, nbl_hash_value); hash_info = (struct ndis_hash_info *)((uint8_t *)rppi + rppi->per_packet_info_offset); hash_info->hash = txr->hn_tx_idx; if (m_head->m_flags & M_VLANTAG) { ndis_8021q_info *rppi_vlan_info; rndis_msg_size += RNDIS_VLAN_PPI_SIZE; rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE, ieee_8021q_info); rppi_vlan_info = (ndis_8021q_info *)((uint8_t *)rppi + rppi->per_packet_info_offset); rppi_vlan_info->u1.s1.vlan_id = m_head->m_pkthdr.ether_vtag & 0xfff; } if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { rndis_tcp_tso_info *tso_info; struct ether_vlan_header *eh; int ether_len; /* * XXX need m_pullup and use mtodo */ eh = mtod(m_head, struct ether_vlan_header*); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; else ether_len = ETHER_HDR_LEN; rndis_msg_size += RNDIS_TSO_PPI_SIZE; rppi = hv_set_rppi_data(rndis_mesg, RNDIS_TSO_PPI_SIZE, tcp_large_send_info); tso_info = (rndis_tcp_tso_info *)((uint8_t *)rppi + rppi->per_packet_info_offset); tso_info->lso_v2_xmit.type = RNDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; #ifdef INET if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) { struct ip *ip = (struct ip *)(m_head->m_data + ether_len); unsigned long iph_len = ip->ip_hl << 2; struct tcphdr *th = (struct tcphdr *)((caddr_t)ip + iph_len); tso_info->lso_v2_xmit.ip_version = RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; ip->ip_len = 0; ip->ip_sum = 0; th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); } #endif #if defined(INET6) && defined(INET) else #endif #ifdef INET6 { struct ip6_hdr *ip6 = (struct ip6_hdr *) (m_head->m_data + ether_len); struct tcphdr *th = (struct tcphdr *)(ip6 + 1); tso_info->lso_v2_xmit.ip_version = RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; ip6->ip6_plen = 0; th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); } #endif tso_info->lso_v2_xmit.tcp_header_offset = 0; tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz; } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) { rndis_tcp_ip_csum_info *csum_info; rndis_msg_size += RNDIS_CSUM_PPI_SIZE; rppi = hv_set_rppi_data(rndis_mesg, RNDIS_CSUM_PPI_SIZE, tcpip_chksum_info); csum_info = (rndis_tcp_ip_csum_info *)((uint8_t *)rppi + rppi->per_packet_info_offset); csum_info->xmit.is_ipv4 = 1; if (m_head->m_pkthdr.csum_flags & CSUM_IP) csum_info->xmit.ip_header_csum = 1; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) { csum_info->xmit.tcp_csum = 1; csum_info->xmit.tcp_header_offset = 0; } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) { csum_info->xmit.udp_csum = 1; } } rndis_mesg->msg_len = packet->tot_data_buf_len + rndis_msg_size; packet->tot_data_buf_len = rndis_mesg->msg_len; /* * Chimney send, if the packet could fit into one chimney buffer. * * TODO: vRSS, chimney buffer should be per-channel. */ if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) { netvsc_dev *net_dev = txr->hn_sc->net_dev; uint32_t send_buf_section_idx; send_buf_section_idx = hv_nv_get_next_send_section(net_dev); if (send_buf_section_idx != NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) { uint8_t *dest = ((uint8_t *)net_dev->send_buf + (send_buf_section_idx * net_dev->send_section_size)); memcpy(dest, rndis_mesg, rndis_msg_size); dest += rndis_msg_size; m_copydata(m_head, 0, m_head->m_pkthdr.len, dest); packet->send_buf_section_idx = send_buf_section_idx; packet->send_buf_section_size = packet->tot_data_buf_len; packet->page_buf_count = 0; txr->hn_tx_chimney++; goto done; } } error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs); if (error) { int freed; /* * This mbuf is not linked w/ the txd yet, so free it now. */ m_freem(m_head); *m_head0 = NULL; freed = hn_txdesc_put(txr, txd); KASSERT(freed != 0, ("fail to free txd upon txdma error")); txr->hn_txdma_failed++; if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1); return error; } *m_head0 = m_head; packet->page_buf_count = nsegs + HV_RF_NUM_TX_RESERVED_PAGE_BUFS; /* send packet with page buffer */ packet->page_buffers[0].pfn = atop(txd->rndis_msg_paddr); packet->page_buffers[0].offset = txd->rndis_msg_paddr & PAGE_MASK; packet->page_buffers[0].length = rndis_msg_size; /* * Fill the page buffers with mbuf info starting at index * HV_RF_NUM_TX_RESERVED_PAGE_BUFS. */ for (i = 0; i < nsegs; ++i) { hv_vmbus_page_buffer *pb = &packet->page_buffers[ i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS]; pb->pfn = atop(segs[i].ds_addr); pb->offset = segs[i].ds_addr & PAGE_MASK; pb->length = segs[i].ds_len; } packet->send_buf_section_idx = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX; packet->send_buf_section_size = 0; done: txd->m = m_head; /* Set the completion routine */ packet->compl.send.on_send_completion = hn_tx_done; packet->compl.send.send_completion_context = packet; packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)txd; return 0; } /* * NOTE: * If this function fails, then txd will be freed, but the mbuf * associated w/ the txd will _not_ be freed. */ static int hn_send_pkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd) { int error, send_failed = 0; again: /* * Make sure that txd is not freed before ETHER_BPF_MTAP. */ hn_txdesc_hold(txd); error = hv_nv_on_send(txr->hn_chan, &txd->netvsc_pkt); if (!error) { ETHER_BPF_MTAP(ifp, txd->m); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (!hn_use_if_start) { if_inc_counter(ifp, IFCOUNTER_OBYTES, txd->m->m_pkthdr.len); if (txd->m->m_flags & M_MCAST) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); } txr->hn_pkts++; } hn_txdesc_put(txr, txd); if (__predict_false(error)) { int freed; /* * This should "really rarely" happen. * * XXX Too many RX to be acked or too many sideband * commands to run? Ask netvsc_channel_rollup() * to kick start later. */ txr->hn_has_txeof = 1; if (!send_failed) { txr->hn_send_failed++; send_failed = 1; /* * Try sending again after set hn_has_txeof; * in case that we missed the last * netvsc_channel_rollup(). */ goto again; } if_printf(ifp, "send failed\n"); /* * Caller will perform further processing on the * associated mbuf, so don't free it in hn_txdesc_put(); * only unload it from the DMA map in hn_txdesc_put(), * if it was loaded. */ txd->m = NULL; freed = hn_txdesc_put(txr, txd); KASSERT(freed != 0, ("fail to free txd upon send error")); txr->hn_send_failed++; } return error; } /* * Start a transmit of one or more packets */ static int hn_start_locked(struct hn_tx_ring *txr, int len) { struct hn_softc *sc = txr->hn_sc; struct ifnet *ifp = sc->hn_ifp; KASSERT(hn_use_if_start, ("hn_start_locked is called, when if_start is disabled")); KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); mtx_assert(&txr->hn_tx_lock, MA_OWNED); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return 0; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { struct hn_txdesc *txd; struct mbuf *m_head; int error; IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (len > 0 && m_head->m_pkthdr.len > len) { /* * This sending could be time consuming; let callers * dispatch this packet sending (and sending of any * following up packets) to tx taskqueue. */ IFQ_DRV_PREPEND(&ifp->if_snd, m_head); return 1; } txd = hn_txdesc_get(txr); if (txd == NULL) { txr->hn_no_txdescs++; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); break; } error = hn_encap(txr, txd, &m_head); if (error) { /* Both txd and m_head are freed */ continue; } error = hn_send_pkt(ifp, txr, txd); if (__predict_false(error)) { /* txd is freed, but m_head is not */ IFQ_DRV_PREPEND(&ifp->if_snd, m_head); atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); break; } } return 0; } /* * Link up/down notification */ void netvsc_linkstatus_callback(struct hv_device *device_obj, uint32_t status) { hn_softc_t *sc = device_get_softc(device_obj->device); - - if (sc == NULL) { - return; - } if (status == 1) { sc->hn_carrier = 1; } else { sc->hn_carrier = 0; } } /* * Append the specified data to the indicated mbuf chain, * Extend the mbuf chain if the new data does not fit in * existing space. * * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c. * There should be an equivalent in the kernel mbuf code, * but there does not appear to be one yet. * * Differs from m_append() in that additional mbufs are * allocated with cluster size MJUMPAGESIZE, and filled * accordingly. * * Return 1 if able to complete the job; otherwise 0. */ static int hv_m_append(struct mbuf *m0, int len, c_caddr_t cp) { struct mbuf *m, *n; int remainder, space; for (m = m0; m->m_next != NULL; m = m->m_next) ; remainder = len; space = M_TRAILINGSPACE(m); if (space > 0) { /* * Copy into available space. */ if (space > remainder) space = remainder; bcopy(cp, mtod(m, caddr_t) + m->m_len, space); m->m_len += space; cp += space; remainder -= space; } while (remainder > 0) { /* * Allocate a new mbuf; could check space * and allocate a cluster instead. */ n = m_getjcl(M_NOWAIT, m->m_type, 0, MJUMPAGESIZE); if (n == NULL) break; n->m_len = min(MJUMPAGESIZE, remainder); bcopy(cp, mtod(n, caddr_t), n->m_len); cp += n->m_len; remainder -= n->m_len; m->m_next = n; m = n; } if (m0->m_flags & M_PKTHDR) m0->m_pkthdr.len += len - remainder; return (remainder == 0); } /* * Called when we receive a data packet from the "wire" on the * specified device * * Note: This is no longer used as a callback */ int netvsc_recv(struct hv_vmbus_channel *chan, netvsc_packet *packet, rndis_tcp_ip_csum_info *csum_info) { struct hn_rx_ring *rxr = chan->hv_chan_rxr; struct ifnet *ifp = rxr->hn_ifp; struct mbuf *m_new; int size, do_lro = 0, do_csum = 1; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return (0); /* * Bail out if packet contains more data than configured MTU. */ if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) { return (0); } else if (packet->tot_data_buf_len <= MHLEN) { m_new = m_gethdr(M_NOWAIT, MT_DATA); if (m_new == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); return (0); } memcpy(mtod(m_new, void *), packet->data, packet->tot_data_buf_len); m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len; rxr->hn_small_pkts++; } else { /* * Get an mbuf with a cluster. For packets 2K or less, * get a standard 2K cluster. For anything larger, get a * 4K cluster. Any buffers larger than 4K can cause problems * if looped around to the Hyper-V TX channel, so avoid them. */ size = MCLBYTES; if (packet->tot_data_buf_len > MCLBYTES) { /* 4096 */ size = MJUMPAGESIZE; } m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size); if (m_new == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); return (0); } hv_m_append(m_new, packet->tot_data_buf_len, packet->data); } m_new->m_pkthdr.rcvif = ifp; if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0)) do_csum = 0; /* receive side checksum offload */ if (csum_info != NULL) { /* IP csum offload */ if (csum_info->receive.ip_csum_succeeded && do_csum) { m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID); rxr->hn_csum_ip++; } /* TCP/UDP csum offload */ if ((csum_info->receive.tcp_csum_succeeded || csum_info->receive.udp_csum_succeeded) && do_csum) { m_new->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; if (csum_info->receive.tcp_csum_succeeded) rxr->hn_csum_tcp++; else rxr->hn_csum_udp++; } if (csum_info->receive.ip_csum_succeeded && csum_info->receive.tcp_csum_succeeded) do_lro = 1; } else { const struct ether_header *eh; uint16_t etype; int hoff; hoff = sizeof(*eh); if (m_new->m_len < hoff) goto skip; eh = mtod(m_new, struct ether_header *); etype = ntohs(eh->ether_type); if (etype == ETHERTYPE_VLAN) { const struct ether_vlan_header *evl; hoff = sizeof(*evl); if (m_new->m_len < hoff) goto skip; evl = mtod(m_new, struct ether_vlan_header *); etype = ntohs(evl->evl_proto); } if (etype == ETHERTYPE_IP) { int pr; pr = hn_check_iplen(m_new, hoff); if (pr == IPPROTO_TCP) { if (do_csum && (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_TCP)) { rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; } /* Rely on SW csum verification though... */ do_lro = 1; } else if (pr == IPPROTO_UDP) { if (do_csum && (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_UDP)) { rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; } } else if (pr != IPPROTO_DONE && do_csum && (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) { rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID); } } } skip: if ((packet->vlan_tci != 0) && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { m_new->m_pkthdr.ether_vtag = packet->vlan_tci; m_new->m_flags |= M_VLANTAG; } m_new->m_pkthdr.flowid = rxr->hn_rx_idx; M_HASHTYPE_SET(m_new, M_HASHTYPE_OPAQUE); /* * Note: Moved RX completion back to hv_nv_on_receive() so all * messages (not just data messages) will trigger a response. */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); rxr->hn_pkts++; if ((ifp->if_capenable & IFCAP_LRO) && do_lro) { #if defined(INET) || defined(INET6) struct lro_ctrl *lro = &rxr->hn_lro; if (lro->lro_cnt) { rxr->hn_lro_tried++; if (tcp_lro_rx(lro, m_new, 0) == 0) { /* DONE! */ return 0; } } #endif } /* We're not holding the lock here, so don't release it */ (*ifp->if_input)(ifp, m_new); return (0); } /* * Rules for using sc->temp_unusable: * 1. sc->temp_unusable can only be read or written while holding NV_LOCK() * 2. code reading sc->temp_unusable under NV_LOCK(), and finding * sc->temp_unusable set, must release NV_LOCK() and exit * 3. to retain exclusive control of the interface, * sc->temp_unusable must be set by code before releasing NV_LOCK() * 4. only code setting sc->temp_unusable can clear sc->temp_unusable * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable */ /* * Standard ioctl entry point. Called when the user wants to configure * the interface. */ static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { hn_softc_t *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; #ifdef INET struct ifaddr *ifa = (struct ifaddr *)data; #endif netvsc_device_info device_info; struct hv_device *hn_dev; int mask, error = 0; int retry_cnt = 500; switch(cmd) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) hn_ifinit(sc); arp_ifinit(ifp, ifa); } else #endif error = ether_ioctl(ifp, cmd, data); break; case SIOCSIFMTU: hn_dev = vmbus_get_devctx(sc->hn_dev); /* Check MTU value change */ if (ifp->if_mtu == ifr->ifr_mtu) break; if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) { error = EINVAL; break; } /* Obtain and record requested MTU */ ifp->if_mtu = ifr->ifr_mtu; #if __FreeBSD_version >= 1100099 /* * Make sure that LRO aggregation length limit is still * valid, after the MTU change. */ NV_LOCK(sc); if (sc->hn_rx_ring[0].hn_lro.lro_length_lim < HN_LRO_LENLIM_MIN(ifp)) hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MIN(ifp)); NV_UNLOCK(sc); #endif do { NV_LOCK(sc); if (!sc->temp_unusable) { sc->temp_unusable = TRUE; retry_cnt = -1; } NV_UNLOCK(sc); if (retry_cnt > 0) { retry_cnt--; DELAY(5 * 1000); } } while (retry_cnt > 0); if (retry_cnt == 0) { error = EINVAL; break; } /* We must remove and add back the device to cause the new * MTU to take effect. This includes tearing down, but not * deleting the channel, then bringing it back up. */ error = hv_rf_on_device_remove(hn_dev, HV_RF_NV_RETAIN_CHANNEL); if (error) { NV_LOCK(sc); sc->temp_unusable = FALSE; NV_UNLOCK(sc); break; } error = hv_rf_on_device_add(hn_dev, &device_info, sc->hn_rx_ring_inuse); if (error) { NV_LOCK(sc); sc->temp_unusable = FALSE; NV_UNLOCK(sc); break; } sc->hn_tx_chimney_max = sc->net_dev->send_section_size; if (sc->hn_tx_ring[0].hn_tx_chimney_size > sc->hn_tx_chimney_max) hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max); hn_ifinit_locked(sc); NV_LOCK(sc); sc->temp_unusable = FALSE; NV_UNLOCK(sc); break; case SIOCSIFFLAGS: do { NV_LOCK(sc); if (!sc->temp_unusable) { sc->temp_unusable = TRUE; retry_cnt = -1; } NV_UNLOCK(sc); if (retry_cnt > 0) { retry_cnt--; DELAY(5 * 1000); } } while (retry_cnt > 0); if (retry_cnt == 0) { error = EINVAL; break; } if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ #ifdef notyet /* Fixme: Promiscuous mode? */ if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->hn_if_flags & IFF_PROMISC)) { /* do something here for Hyper-V */ } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->hn_if_flags & IFF_PROMISC) { /* do something here for Hyper-V */ } else #endif hn_ifinit_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { hn_stop(sc); } } NV_LOCK(sc); sc->temp_unusable = FALSE; NV_UNLOCK(sc); sc->hn_if_flags = ifp->if_flags; error = 0; break; case SIOCSIFCAP: NV_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; if (ifp->if_capenable & IFCAP_TXCSUM) { ifp->if_hwassist |= sc->hn_tx_ring[0].hn_csum_assist; } else { ifp->if_hwassist &= ~sc->hn_tx_ring[0].hn_csum_assist; } } if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_IP_TSO; else ifp->if_hwassist &= ~CSUM_IP_TSO; } if (mask & IFCAP_TSO6) { ifp->if_capenable ^= IFCAP_TSO6; if (ifp->if_capenable & IFCAP_TSO6) ifp->if_hwassist |= CSUM_IP6_TSO; else ifp->if_hwassist &= ~CSUM_IP6_TSO; } NV_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: #ifdef notyet /* Fixme: Multicast mode? */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) { NV_LOCK(sc); netvsc_setmulti(sc); NV_UNLOCK(sc); error = 0; } #endif error = EINVAL; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } /* * */ static void hn_stop(hn_softc_t *sc) { struct ifnet *ifp; int ret, i; struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev); ifp = sc->hn_ifp; if (bootverbose) printf(" Closing Device ...\n"); atomic_clear_int(&ifp->if_drv_flags, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); for (i = 0; i < sc->hn_tx_ring_inuse; ++i) sc->hn_tx_ring[i].hn_oactive = 0; if_link_state_change(ifp, LINK_STATE_DOWN); sc->hn_initdone = 0; ret = hv_rf_on_close(device_ctx); } /* * FreeBSD transmit entry point */ static void hn_start(struct ifnet *ifp) { struct hn_softc *sc = ifp->if_softc; struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; if (txr->hn_sched_tx) goto do_sched; if (mtx_trylock(&txr->hn_tx_lock)) { int sched; sched = hn_start_locked(txr, txr->hn_direct_tx_size); mtx_unlock(&txr->hn_tx_lock); if (!sched) return; } do_sched: taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); } static void hn_start_txeof(struct hn_tx_ring *txr) { struct hn_softc *sc = txr->hn_sc; struct ifnet *ifp = sc->hn_ifp; KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); if (txr->hn_sched_tx) goto do_sched; if (mtx_trylock(&txr->hn_tx_lock)) { int sched; atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); sched = hn_start_locked(txr, txr->hn_direct_tx_size); mtx_unlock(&txr->hn_tx_lock); if (sched) { taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); } } else { do_sched: /* * Release the OACTIVE earlier, with the hope, that * others could catch up. The task will clear the * flag again with the hn_tx_lock to avoid possible * races. */ atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); } } /* * */ static void hn_ifinit_locked(hn_softc_t *sc) { struct ifnet *ifp; struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev); int ret, i; ifp = sc->hn_ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { return; } hv_promisc_mode = 1; ret = hv_rf_on_open(device_ctx); if (ret != 0) { return; } else { sc->hn_initdone = 1; } atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); for (i = 0; i < sc->hn_tx_ring_inuse; ++i) sc->hn_tx_ring[i].hn_oactive = 0; atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING); if_link_state_change(ifp, LINK_STATE_UP); } /* * */ static void hn_ifinit(void *xsc) { hn_softc_t *sc = xsc; NV_LOCK(sc); if (sc->temp_unusable) { NV_UNLOCK(sc); return; } sc->temp_unusable = TRUE; NV_UNLOCK(sc); hn_ifinit_locked(sc); NV_LOCK(sc); sc->temp_unusable = FALSE; NV_UNLOCK(sc); } #ifdef LATER /* * */ static void hn_watchdog(struct ifnet *ifp) { hn_softc_t *sc; sc = ifp->if_softc; printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit); hn_ifinit(sc); /*???*/ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } #endif #if __FreeBSD_version >= 1100099 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; unsigned int lenlim; int error; lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim; error = sysctl_handle_int(oidp, &lenlim, 0, req); if (error || req->newptr == NULL) return error; if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) || lenlim > TCP_LRO_LENGTH_MAX) return EINVAL; NV_LOCK(sc); hn_set_lro_lenlim(sc, lenlim); NV_UNLOCK(sc); return 0; } static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ackcnt, error, i; /* * lro_ackcnt_lim is append count limit, * +1 to turn it into aggregation limit. */ ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1; error = sysctl_handle_int(oidp, &ackcnt, 0, req); if (error || req->newptr == NULL) return error; if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1)) return EINVAL; /* * Convert aggregation limit back to append * count limit. */ --ackcnt; NV_LOCK(sc); for (i = 0; i < sc->hn_rx_ring_inuse; ++i) sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt; NV_UNLOCK(sc); return 0; } #endif static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int hcsum = arg2; int on, error, i; on = 0; if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum) on = 1; error = sysctl_handle_int(oidp, &on, 0, req); if (error || req->newptr == NULL) return error; NV_LOCK(sc); for (i = 0; i < sc->hn_rx_ring_inuse; ++i) { struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; if (on) rxr->hn_trust_hcsum |= hcsum; else rxr->hn_trust_hcsum &= ~hcsum; } NV_UNLOCK(sc); return 0; } static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int chimney_size, error; chimney_size = sc->hn_tx_ring[0].hn_tx_chimney_size; error = sysctl_handle_int(oidp, &chimney_size, 0, req); if (error || req->newptr == NULL) return error; if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0) return EINVAL; hn_set_tx_chimney_size(sc, chimney_size); return 0; } static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error; struct hn_rx_ring *rxr; u_long stat; stat = 0; for (i = 0; i < sc->hn_rx_ring_inuse; ++i) { rxr = &sc->hn_rx_ring[i]; stat += *((u_long *)((uint8_t *)rxr + ofs)); } error = sysctl_handle_long(oidp, &stat, 0, req); if (error || req->newptr == NULL) return error; /* Zero out this stat. */ for (i = 0; i < sc->hn_rx_ring_inuse; ++i) { rxr = &sc->hn_rx_ring[i]; *((u_long *)((uint8_t *)rxr + ofs)) = 0; } return 0; } static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error; struct hn_rx_ring *rxr; uint64_t stat; stat = 0; for (i = 0; i < sc->hn_rx_ring_inuse; ++i) { rxr = &sc->hn_rx_ring[i]; stat += *((uint64_t *)((uint8_t *)rxr + ofs)); } error = sysctl_handle_64(oidp, &stat, 0, req); if (error || req->newptr == NULL) return error; /* Zero out this stat. */ for (i = 0; i < sc->hn_rx_ring_inuse; ++i) { rxr = &sc->hn_rx_ring[i]; *((uint64_t *)((uint8_t *)rxr + ofs)) = 0; } return 0; } static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error; struct hn_tx_ring *txr; u_long stat; stat = 0; for (i = 0; i < sc->hn_tx_ring_inuse; ++i) { txr = &sc->hn_tx_ring[i]; stat += *((u_long *)((uint8_t *)txr + ofs)); } error = sysctl_handle_long(oidp, &stat, 0, req); if (error || req->newptr == NULL) return error; /* Zero out this stat. */ for (i = 0; i < sc->hn_tx_ring_inuse; ++i) { txr = &sc->hn_tx_ring[i]; *((u_long *)((uint8_t *)txr + ofs)) = 0; } return 0; } static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error, conf; struct hn_tx_ring *txr; txr = &sc->hn_tx_ring[0]; conf = *((int *)((uint8_t *)txr + ofs)); error = sysctl_handle_int(oidp, &conf, 0, req); if (error || req->newptr == NULL) return error; NV_LOCK(sc); for (i = 0; i < sc->hn_tx_ring_inuse; ++i) { txr = &sc->hn_tx_ring[i]; *((int *)((uint8_t *)txr + ofs)) = conf; } NV_UNLOCK(sc); return 0; } static int hn_check_iplen(const struct mbuf *m, int hoff) { const struct ip *ip; int len, iphlen, iplen; const struct tcphdr *th; int thoff; /* TCP data offset */ len = hoff + sizeof(struct ip); /* The packet must be at least the size of an IP header. */ if (m->m_pkthdr.len < len) return IPPROTO_DONE; /* The fixed IP header must reside completely in the first mbuf. */ if (m->m_len < len) return IPPROTO_DONE; ip = mtodo(m, hoff); /* Bound check the packet's stated IP header length. */ iphlen = ip->ip_hl << 2; if (iphlen < sizeof(struct ip)) /* minimum header length */ return IPPROTO_DONE; /* The full IP header must reside completely in the one mbuf. */ if (m->m_len < hoff + iphlen) return IPPROTO_DONE; iplen = ntohs(ip->ip_len); /* * Check that the amount of data in the buffers is as * at least much as the IP header would have us expect. */ if (m->m_pkthdr.len < hoff + iplen) return IPPROTO_DONE; /* * Ignore IP fragments. */ if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF)) return IPPROTO_DONE; /* * The TCP/IP or UDP/IP header must be entirely contained within * the first fragment of a packet. */ switch (ip->ip_p) { case IPPROTO_TCP: if (iplen < iphlen + sizeof(struct tcphdr)) return IPPROTO_DONE; if (m->m_len < hoff + iphlen + sizeof(struct tcphdr)) return IPPROTO_DONE; th = (const struct tcphdr *)((const uint8_t *)ip + iphlen); thoff = th->th_off << 2; if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen) return IPPROTO_DONE; if (m->m_len < hoff + iphlen + thoff) return IPPROTO_DONE; break; case IPPROTO_UDP: if (iplen < iphlen + sizeof(struct udphdr)) return IPPROTO_DONE; if (m->m_len < hoff + iphlen + sizeof(struct udphdr)) return IPPROTO_DONE; break; default: if (iplen < iphlen) return IPPROTO_DONE; break; } return ip->ip_p; } static void hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = arg; if (error) return; KASSERT(nseg == 1, ("too many segments %d!", nseg)); *paddr = segs->ds_addr; } static void hn_create_rx_data(struct hn_softc *sc, int ring_cnt) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; device_t dev = sc->hn_dev; #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 int lroent_cnt; #endif #endif int i; sc->hn_rx_ring_cnt = ring_cnt; sc->hn_rx_ring_inuse = sc->hn_rx_ring_cnt; sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt, M_NETVSC, M_WAITOK | M_ZERO); #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 lroent_cnt = hn_lro_entry_count; if (lroent_cnt < TCP_LRO_ENTRIES) lroent_cnt = TCP_LRO_ENTRIES; device_printf(dev, "LRO: entry count %d\n", lroent_cnt); #endif #endif /* INET || INET6 */ ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); /* Create dev.hn.UNIT.rx sysctl tree */ sc->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "rx", CTLFLAG_RD, 0, ""); for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; if (hn_trust_hosttcp) rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP; if (hn_trust_hostudp) rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP; if (hn_trust_hostip) rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP; rxr->hn_ifp = sc->hn_ifp; rxr->hn_rx_idx = i; /* * Initialize LRO. */ #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt, 0); #else tcp_lro_init(&rxr->hn_lro); rxr->hn_lro.ifp = sc->hn_ifp; #endif #if __FreeBSD_version >= 1100099 rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF; rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF; #endif #endif /* INET || INET6 */ if (sc->hn_rx_sysctl_tree != NULL) { char name[16]; /* * Create per RX ring sysctl tree: * dev.hn.UNIT.rx.RINGID */ snprintf(name, sizeof(name), "%d", i); rxr->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->hn_rx_sysctl_tree), OID_AUTO, name, CTLFLAG_RD, 0, ""); if (rxr->hn_rx_sysctl_tree != NULL) { SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree), OID_AUTO, "packets", CTLFLAG_RW, &rxr->hn_pkts, "# of packets received"); } } } SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued", CTLTYPE_U64 | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_lro.lro_queued), hn_rx_stat_u64_sysctl, "LU", "LRO queued"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed", CTLTYPE_U64 | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_lro.lro_flushed), hn_rx_stat_u64_sysctl, "LU", "LRO flushed"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_lro_tried), hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries"); #if __FreeBSD_version >= 1100099 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim", CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU", "Max # of data bytes to be aggregated by LRO"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim", CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I", "Max # of ACKs to be aggregated by LRO"); #endif SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp", CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP, hn_trust_hcsum_sysctl, "I", "Trust tcp segement verification on host side, " "when csum info is missing"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp", CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP, hn_trust_hcsum_sysctl, "I", "Trust udp datagram verification on host side, " "when csum info is missing"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip", CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP, hn_trust_hcsum_sysctl, "I", "Trust ip packet verification on host side, " "when csum info is missing"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_csum_ip), hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_csum_tcp), hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_csum_udp), hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_csum_trusted), hn_rx_stat_ulong_sysctl, "LU", "# of packets that we trust host's csum verification"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_rx_ring, hn_small_pkts), hn_rx_stat_ulong_sysctl, "LU", "# of small packets received"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->hn_rx_ring_cnt, 0, "# created RX rings"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_inuse", CTLFLAG_RD, &sc->hn_rx_ring_inuse, 0, "# used RX rings"); } static void hn_destroy_rx_data(struct hn_softc *sc) { #if defined(INET) || defined(INET6) int i; #endif if (sc->hn_rx_ring_cnt == 0) return; #if defined(INET) || defined(INET6) for (i = 0; i < sc->hn_rx_ring_cnt; ++i) tcp_lro_free(&sc->hn_rx_ring[i].hn_lro); #endif free(sc->hn_rx_ring, M_NETVSC); sc->hn_rx_ring = NULL; sc->hn_rx_ring_cnt = 0; sc->hn_rx_ring_inuse = 0; } static int hn_create_tx_ring(struct hn_softc *sc, int id) { struct hn_tx_ring *txr = &sc->hn_tx_ring[id]; bus_dma_tag_t parent_dtag; int error, i; txr->hn_sc = sc; txr->hn_tx_idx = id; #ifndef HN_USE_TXDESC_BUFRING mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN); #endif mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF); txr->hn_txdesc_cnt = HN_TX_DESC_CNT; txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt, M_NETVSC, M_WAITOK | M_ZERO); #ifndef HN_USE_TXDESC_BUFRING SLIST_INIT(&txr->hn_txlist); #else txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC, M_WAITOK, &txr->hn_tx_lock); #endif txr->hn_tx_taskq = sc->hn_tx_taskq; if (hn_use_if_start) { txr->hn_txeof = hn_start_txeof; TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr); TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr); } else { txr->hn_txeof = hn_xmit_txeof; TASK_INIT(&txr->hn_tx_task, 0, hn_xmit_taskfunc, txr); TASK_INIT(&txr->hn_txeof_task, 0, hn_xmit_txeof_taskfunc, txr); txr->hn_mbuf_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC, M_WAITOK, &txr->hn_tx_lock); } txr->hn_direct_tx_size = hn_direct_tx_size; if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1) txr->hn_csum_assist = HN_CSUM_ASSIST; else txr->hn_csum_assist = HN_CSUM_ASSIST_WIN8; /* * Always schedule transmission instead of trying to do direct * transmission. This one gives the best performance so far. */ txr->hn_sched_tx = 1; parent_dtag = bus_get_dma_tag(sc->hn_dev); /* DMA tag for RNDIS messages. */ error = bus_dma_tag_create(parent_dtag, /* parent */ HN_RNDIS_MSG_ALIGN, /* alignment */ HN_RNDIS_MSG_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HN_RNDIS_MSG_LEN, /* maxsize */ 1, /* nsegments */ HN_RNDIS_MSG_LEN, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->hn_tx_rndis_dtag); if (error) { device_printf(sc->hn_dev, "failed to create rndis dmatag\n"); return error; } /* DMA tag for data. */ error = bus_dma_tag_create(parent_dtag, /* parent */ 1, /* alignment */ HN_TX_DATA_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HN_TX_DATA_MAXSIZE, /* maxsize */ HN_TX_DATA_SEGCNT_MAX, /* nsegments */ HN_TX_DATA_SEGSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->hn_tx_data_dtag); if (error) { device_printf(sc->hn_dev, "failed to create data dmatag\n"); return error; } for (i = 0; i < txr->hn_txdesc_cnt; ++i) { struct hn_txdesc *txd = &txr->hn_txdesc[i]; txd->txr = txr; /* * Allocate and load RNDIS messages. */ error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag, (void **)&txd->rndis_msg, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &txd->rndis_msg_dmap); if (error) { device_printf(sc->hn_dev, "failed to allocate rndis_msg, %d\n", i); return error; } error = bus_dmamap_load(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap, txd->rndis_msg, HN_RNDIS_MSG_LEN, hn_dma_map_paddr, &txd->rndis_msg_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->hn_dev, "failed to load rndis_msg, %d\n", i); bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg, txd->rndis_msg_dmap); return error; } /* DMA map for TX data. */ error = bus_dmamap_create(txr->hn_tx_data_dtag, 0, &txd->data_dmap); if (error) { device_printf(sc->hn_dev, "failed to allocate tx data dmamap\n"); bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap); bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg, txd->rndis_msg_dmap); return error; } /* All set, put it to list */ txd->flags |= HN_TXD_FLAG_ONLIST; #ifndef HN_USE_TXDESC_BUFRING SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); #else buf_ring_enqueue(txr->hn_txdesc_br, txd); #endif } txr->hn_txdesc_avail = txr->hn_txdesc_cnt; if (sc->hn_tx_sysctl_tree != NULL) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; char name[16]; /* * Create per TX ring sysctl tree: * dev.hn.UNIT.tx.RINGID */ ctx = device_get_sysctl_ctx(sc->hn_dev); child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree); snprintf(name, sizeof(name), "%d", id); txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, name, CTLFLAG_RD, 0, ""); if (txr->hn_tx_sysctl_tree != NULL) { child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail", CTLFLAG_RD, &txr->hn_txdesc_avail, 0, "# of available TX descs"); if (!hn_use_if_start) { SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive", CTLFLAG_RD, &txr->hn_oactive, 0, "over active"); } SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "packets", CTLFLAG_RW, &txr->hn_pkts, "# of packets transmitted"); } } return 0; } static void hn_txdesc_dmamap_destroy(struct hn_txdesc *txd) { struct hn_tx_ring *txr = txd->txr; KASSERT(txd->m == NULL, ("still has mbuf installed")); KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped")); bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap); bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg, txd->rndis_msg_dmap); bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap); } static void hn_destroy_tx_ring(struct hn_tx_ring *txr) { struct hn_txdesc *txd; if (txr->hn_txdesc == NULL) return; #ifndef HN_USE_TXDESC_BUFRING while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) { SLIST_REMOVE_HEAD(&txr->hn_txlist, link); hn_txdesc_dmamap_destroy(txd); } #else mtx_lock(&txr->hn_tx_lock); while ((txd = buf_ring_dequeue_sc(txr->hn_txdesc_br)) != NULL) hn_txdesc_dmamap_destroy(txd); mtx_unlock(&txr->hn_tx_lock); #endif if (txr->hn_tx_data_dtag != NULL) bus_dma_tag_destroy(txr->hn_tx_data_dtag); if (txr->hn_tx_rndis_dtag != NULL) bus_dma_tag_destroy(txr->hn_tx_rndis_dtag); #ifdef HN_USE_TXDESC_BUFRING buf_ring_free(txr->hn_txdesc_br, M_NETVSC); #endif free(txr->hn_txdesc, M_NETVSC); txr->hn_txdesc = NULL; if (txr->hn_mbuf_br != NULL) buf_ring_free(txr->hn_mbuf_br, M_NETVSC); #ifndef HN_USE_TXDESC_BUFRING mtx_destroy(&txr->hn_txlist_spin); #endif mtx_destroy(&txr->hn_tx_lock); } static int hn_create_tx_data(struct hn_softc *sc, int ring_cnt) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; int i; sc->hn_tx_ring_cnt = ring_cnt; sc->hn_tx_ring_inuse = sc->hn_tx_ring_cnt; sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt, M_NETVSC, M_WAITOK | M_ZERO); ctx = device_get_sysctl_ctx(sc->hn_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev)); /* Create dev.hn.UNIT.tx sysctl tree */ sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx", CTLFLAG_RD, 0, ""); for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { int error; error = hn_create_tx_ring(sc, i); if (error) return error; } SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_no_txdescs), hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_send_failed), hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_txdma_failed), hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_tx_collapsed), hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney", CTLTYPE_ULONG | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_tx_chimney), hn_tx_stat_ulong_sysctl, "LU", "# of chimney send"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt", CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0, "# of total TX descs"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max", CTLFLAG_RD, &sc->hn_tx_chimney_max, 0, "Chimney send packet size upper boundary"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size", CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl, "I", "Chimney send packet size limit"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size", CTLTYPE_INT | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_direct_tx_size), hn_tx_conf_int_sysctl, "I", "Size of the packet for direct transmission"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx", CTLTYPE_INT | CTLFLAG_RW, sc, __offsetof(struct hn_tx_ring, hn_sched_tx), hn_tx_conf_int_sysctl, "I", "Always schedule transmission " "instead of doing direct transmission"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->hn_tx_ring_cnt, 0, "# created TX rings"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->hn_tx_ring_inuse, 0, "# used TX rings"); return 0; } static void hn_set_tx_chimney_size(struct hn_softc *sc, int chimney_size) { int i; NV_LOCK(sc); for (i = 0; i < sc->hn_tx_ring_inuse; ++i) sc->hn_tx_ring[i].hn_tx_chimney_size = chimney_size; NV_UNLOCK(sc); } static void hn_destroy_tx_data(struct hn_softc *sc) { int i; if (sc->hn_tx_ring_cnt == 0) return; for (i = 0; i < sc->hn_tx_ring_cnt; ++i) hn_destroy_tx_ring(&sc->hn_tx_ring[i]); free(sc->hn_tx_ring, M_NETVSC); sc->hn_tx_ring = NULL; sc->hn_tx_ring_cnt = 0; sc->hn_tx_ring_inuse = 0; } static void hn_start_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; mtx_lock(&txr->hn_tx_lock); hn_start_locked(txr, 0); mtx_unlock(&txr->hn_tx_lock); } static void hn_start_txeof_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; mtx_lock(&txr->hn_tx_lock); atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE); hn_start_locked(txr, 0); mtx_unlock(&txr->hn_tx_lock); } static void hn_stop_tx_tasks(struct hn_softc *sc) { int i; for (i = 0; i < sc->hn_tx_ring_inuse; ++i) { struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task); taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task); } } static int hn_xmit(struct hn_tx_ring *txr, int len) { struct hn_softc *sc = txr->hn_sc; struct ifnet *ifp = sc->hn_ifp; struct mbuf *m_head; mtx_assert(&txr->hn_tx_lock, MA_OWNED); KASSERT(hn_use_if_start == 0, ("hn_xmit is called, when if_start is enabled")); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || txr->hn_oactive) return 0; while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) { struct hn_txdesc *txd; int error; if (len > 0 && m_head->m_pkthdr.len > len) { /* * This sending could be time consuming; let callers * dispatch this packet sending (and sending of any * following up packets) to tx taskqueue. */ drbr_putback(ifp, txr->hn_mbuf_br, m_head); return 1; } txd = hn_txdesc_get(txr); if (txd == NULL) { txr->hn_no_txdescs++; drbr_putback(ifp, txr->hn_mbuf_br, m_head); txr->hn_oactive = 1; break; } error = hn_encap(txr, txd, &m_head); if (error) { /* Both txd and m_head are freed; discard */ drbr_advance(ifp, txr->hn_mbuf_br); continue; } error = hn_send_pkt(ifp, txr, txd); if (__predict_false(error)) { /* txd is freed, but m_head is not */ drbr_putback(ifp, txr->hn_mbuf_br, m_head); txr->hn_oactive = 1; break; } /* Sent */ drbr_advance(ifp, txr->hn_mbuf_br); } return 0; } static int hn_transmit(struct ifnet *ifp, struct mbuf *m) { struct hn_softc *sc = ifp->if_softc; struct hn_tx_ring *txr; int error, idx = 0; /* * Select the TX ring based on flowid */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) idx = m->m_pkthdr.flowid % sc->hn_tx_ring_inuse; txr = &sc->hn_tx_ring[idx]; error = drbr_enqueue(ifp, txr->hn_mbuf_br, m); if (error) return error; if (txr->hn_oactive) return 0; if (txr->hn_sched_tx) goto do_sched; if (mtx_trylock(&txr->hn_tx_lock)) { int sched; sched = hn_xmit(txr, txr->hn_direct_tx_size); mtx_unlock(&txr->hn_tx_lock); if (!sched) return 0; } do_sched: taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); return 0; } static void hn_xmit_qflush(struct ifnet *ifp) { struct hn_softc *sc = ifp->if_softc; int i; for (i = 0; i < sc->hn_tx_ring_inuse; ++i) { struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; struct mbuf *m; mtx_lock(&txr->hn_tx_lock); while ((m = buf_ring_dequeue_sc(txr->hn_mbuf_br)) != NULL) m_freem(m); mtx_unlock(&txr->hn_tx_lock); } if_qflush(ifp); } static void hn_xmit_txeof(struct hn_tx_ring *txr) { if (txr->hn_sched_tx) goto do_sched; if (mtx_trylock(&txr->hn_tx_lock)) { int sched; txr->hn_oactive = 0; sched = hn_xmit(txr, txr->hn_direct_tx_size); mtx_unlock(&txr->hn_tx_lock); if (sched) { taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); } } else { do_sched: /* * Release the oactive earlier, with the hope, that * others could catch up. The task will clear the * oactive again with the hn_tx_lock to avoid possible * races. */ txr->hn_oactive = 0; taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); } } static void hn_xmit_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; mtx_lock(&txr->hn_tx_lock); hn_xmit(txr, 0); mtx_unlock(&txr->hn_tx_lock); } static void hn_xmit_txeof_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; mtx_lock(&txr->hn_tx_lock); txr->hn_oactive = 0; hn_xmit(txr, 0); mtx_unlock(&txr->hn_tx_lock); } static void hn_channel_attach(struct hn_softc *sc, struct hv_vmbus_channel *chan) { struct hn_rx_ring *rxr; int idx; idx = chan->offer_msg.offer.sub_channel_index; KASSERT(idx >= 0 && idx < sc->hn_rx_ring_inuse, ("invalid channel index %d, should > 0 && < %d", idx, sc->hn_rx_ring_inuse)); rxr = &sc->hn_rx_ring[idx]; KASSERT((rxr->hn_rx_flags & HN_RX_FLAG_ATTACHED) == 0, ("RX ring %d already attached", idx)); rxr->hn_rx_flags |= HN_RX_FLAG_ATTACHED; chan->hv_chan_rxr = rxr; if_printf(sc->hn_ifp, "link RX ring %d to channel%u\n", idx, chan->offer_msg.child_rel_id); if (idx < sc->hn_tx_ring_inuse) { struct hn_tx_ring *txr = &sc->hn_tx_ring[idx]; KASSERT((txr->hn_tx_flags & HN_TX_FLAG_ATTACHED) == 0, ("TX ring %d already attached", idx)); txr->hn_tx_flags |= HN_TX_FLAG_ATTACHED; chan->hv_chan_txr = txr; txr->hn_chan = chan; if_printf(sc->hn_ifp, "link TX ring %d to channel%u\n", idx, chan->offer_msg.child_rel_id); } /* Bind channel to a proper CPU */ vmbus_channel_cpu_set(chan, (sc->hn_cpu + idx) % mp_ncpus); } void netvsc_subchan_callback(struct hn_softc *sc, struct hv_vmbus_channel *chan) { KASSERT(!HV_VMBUS_CHAN_ISPRIMARY(chan), ("subchannel callback on primary channel")); KASSERT(chan->offer_msg.offer.sub_channel_index > 0, ("invalid channel subidx %u", chan->offer_msg.offer.sub_channel_index)); hn_channel_attach(sc, chan); } static void hn_tx_taskq_create(void *arg __unused) { if (!hn_share_tx_taskq) return; hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, taskqueue_thread_enqueue, &hn_tx_taskq); if (hn_bind_tx_taskq >= 0) { int cpu = hn_bind_tx_taskq; cpuset_t cpu_set; if (cpu > mp_ncpus - 1) cpu = mp_ncpus - 1; CPU_SETOF(cpu, &cpu_set); taskqueue_start_threads_cpuset(&hn_tx_taskq, 1, PI_NET, &cpu_set, "hn tx"); } else { taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx"); } } SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST, hn_tx_taskq_create, NULL); static void hn_tx_taskq_destroy(void *arg __unused) { if (hn_tx_taskq != NULL) taskqueue_free(hn_tx_taskq); } SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST, hn_tx_taskq_destroy, NULL); static device_method_t netvsc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, netvsc_probe), DEVMETHOD(device_attach, netvsc_attach), DEVMETHOD(device_detach, netvsc_detach), DEVMETHOD(device_shutdown, netvsc_shutdown), { 0, 0 } }; static driver_t netvsc_driver = { NETVSC_DEVNAME, netvsc_methods, sizeof(hn_softc_t) }; static devclass_t netvsc_devclass; DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0); MODULE_VERSION(hn, 1); MODULE_DEPEND(hn, vmbus, 1, 1, 1); Index: projects/release-pkg/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c =================================================================== --- projects/release-pkg/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c (revision 297926) +++ projects/release-pkg/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c (revision 297927) @@ -1,2137 +1,2130 @@ /*- * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * StorVSC driver for Hyper-V. This driver presents a SCSI HBA interface * to the Comman Access Method (CAM) layer. CAM control blocks (CCBs) are * converted into VSCSI protocol messages which are delivered to the parent * partition StorVSP driver over the Hyper-V VMBUS. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "hv_vstorage.h" #define STORVSC_RINGBUFFER_SIZE (20*PAGE_SIZE) #define STORVSC_MAX_LUNS_PER_TARGET (64) #define STORVSC_MAX_IO_REQUESTS (STORVSC_MAX_LUNS_PER_TARGET * 2) #define BLKVSC_MAX_IDE_DISKS_PER_TARGET (1) #define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS #define STORVSC_MAX_TARGETS (2) #define STORVSC_WIN7_MAJOR 4 #define STORVSC_WIN7_MINOR 2 #define STORVSC_WIN8_MAJOR 5 #define STORVSC_WIN8_MINOR 1 #define VSTOR_PKT_SIZE (sizeof(struct vstor_packet) - vmscsi_size_delta) #define HV_ALIGN(x, a) roundup2(x, a) struct storvsc_softc; struct hv_sgl_node { LIST_ENTRY(hv_sgl_node) link; struct sglist *sgl_data; }; struct hv_sgl_page_pool{ LIST_HEAD(, hv_sgl_node) in_use_sgl_list; LIST_HEAD(, hv_sgl_node) free_sgl_list; boolean_t is_init; } g_hv_sgl_page_pool; #define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * HV_MAX_MULTIPAGE_BUFFER_COUNT enum storvsc_request_type { WRITE_TYPE, READ_TYPE, UNKNOWN_TYPE }; struct hv_storvsc_request { LIST_ENTRY(hv_storvsc_request) link; struct vstor_packet vstor_packet; hv_vmbus_multipage_buffer data_buf; void *sense_data; uint8_t sense_info_len; uint8_t retries; union ccb *ccb; struct storvsc_softc *softc; struct callout callout; struct sema synch_sema; /*Synchronize the request/response if needed */ struct sglist *bounce_sgl; unsigned int bounce_sgl_count; uint64_t not_aligned_seg_bits; }; struct storvsc_softc { struct hv_device *hs_dev; LIST_HEAD(, hv_storvsc_request) hs_free_list; struct mtx hs_lock; struct storvsc_driver_props *hs_drv_props; int hs_unit; uint32_t hs_frozen; struct cam_sim *hs_sim; struct cam_path *hs_path; uint32_t hs_num_out_reqs; boolean_t hs_destroy; boolean_t hs_drain_notify; boolean_t hs_open_multi_channel; struct sema hs_drain_sema; struct hv_storvsc_request hs_init_req; struct hv_storvsc_request hs_reset_req; }; /** * HyperV storvsc timeout testing cases: * a. IO returned after first timeout; * b. IO returned after second timeout and queue freeze; * c. IO returned while timer handler is running * The first can be tested by "sg_senddiag -vv /dev/daX", * and the second and third can be done by * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX". */ #define HVS_TIMEOUT_TEST 0 /* * Bus/adapter reset functionality on the Hyper-V host is * buggy and it will be disabled until * it can be further tested. */ #define HVS_HOST_RESET 0 struct storvsc_driver_props { char *drv_name; char *drv_desc; uint8_t drv_max_luns_per_target; uint8_t drv_max_ios_per_target; uint32_t drv_ringbuffer_size; }; enum hv_storage_type { DRIVER_BLKVSC, DRIVER_STORVSC, DRIVER_UNKNOWN }; #define HS_MAX_ADAPTERS 10 #define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1 /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ static const hv_guid gStorVscDeviceType={ .data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f} }; /* {32412632-86cb-44a2-9b5c-50d1417354f5} */ static const hv_guid gBlkVscDeviceType={ .data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5} }; static struct storvsc_driver_props g_drv_props_table[] = { {"blkvsc", "Hyper-V IDE Storage Interface", BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS, STORVSC_RINGBUFFER_SIZE}, {"storvsc", "Hyper-V SCSI Storage Interface", STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS, STORVSC_RINGBUFFER_SIZE} }; /* * Sense buffer size changed in win8; have a run-time * variable to track the size we should use. */ static int sense_buffer_size; /* * The size of the vmscsi_request has changed in win8. The * additional size is for the newly added elements in the * structure. These elements are valid only when we are talking * to a win8 host. * Track the correct size we need to apply. */ static int vmscsi_size_delta; static int storvsc_current_major; static int storvsc_current_minor; /* static functions */ static int storvsc_probe(device_t dev); static int storvsc_attach(device_t dev); static int storvsc_detach(device_t dev); static void storvsc_poll(struct cam_sim * sim); static void storvsc_action(struct cam_sim * sim, union ccb * ccb); static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp); static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp); static enum hv_storage_type storvsc_get_storage_type(device_t dev); static void hv_storvsc_rescan_target(struct storvsc_softc *sc); static void hv_storvsc_on_channel_callback(void *context); static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc, struct vstor_packet *vstor_packet, struct hv_storvsc_request *request); static int hv_storvsc_connect_vsp(struct hv_device *device); static void storvsc_io_done(struct hv_storvsc_request *reqp); static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl, bus_dma_segment_t *orig_sgl, unsigned int orig_sgl_count, uint64_t seg_bits); void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl, unsigned int dest_sgl_count, struct sglist* src_sgl, uint64_t seg_bits); static device_method_t storvsc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, storvsc_probe), DEVMETHOD(device_attach, storvsc_attach), DEVMETHOD(device_detach, storvsc_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; static driver_t storvsc_driver = { "storvsc", storvsc_methods, sizeof(struct storvsc_softc), }; static devclass_t storvsc_devclass; DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0); MODULE_VERSION(storvsc, 1); MODULE_DEPEND(storvsc, vmbus, 1, 1, 1); /** * The host is capable of sending messages to us that are * completely unsolicited. So, we need to address the race * condition where we may be in the process of unloading the * driver when the host may send us an unsolicited message. * We address this issue by implementing a sequentially * consistent protocol: * * 1. Channel callback is invoked while holding the the channel lock * and an unloading driver will reset the channel callback under * the protection of this channel lock. * * 2. To ensure bounded wait time for unloading a driver, we don't * permit outgoing traffic once the device is marked as being * destroyed. * * 3. Once the device is marked as being destroyed, we only * permit incoming traffic to properly account for * packets already sent out. */ static inline struct storvsc_softc * get_stor_device(struct hv_device *device, boolean_t outbound) { struct storvsc_softc *sc; sc = device_get_softc(device->device); - if (sc == NULL) { - return NULL; - } if (outbound) { /* * Here we permit outgoing I/O only * if the device is not being destroyed. */ if (sc->hs_destroy) { sc = NULL; } } else { /* * inbound case; if being destroyed * only permit to account for * messages already sent out. */ if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) { sc = NULL; } } return sc; } /** * @brief Callback handler, will be invoked when receive mutil-channel offer * * @param context new multi-channel */ static void storvsc_handle_sc_creation(void *context) { hv_vmbus_channel *new_channel; struct hv_device *device; struct storvsc_softc *sc; struct vmstor_chan_props props; int ret = 0; new_channel = (hv_vmbus_channel *)context; device = new_channel->device; sc = get_stor_device(device, TRUE); if (sc == NULL) return; if (FALSE == sc->hs_open_multi_channel) return; memset(&props, 0, sizeof(props)); ret = hv_vmbus_channel_open(new_channel, sc->hs_drv_props->drv_ringbuffer_size, sc->hs_drv_props->drv_ringbuffer_size, (void *)&props, sizeof(struct vmstor_chan_props), hv_storvsc_on_channel_callback, new_channel); return; } /** * @brief Send multi-channel creation request to host * * @param device a Hyper-V device pointer * @param max_chans the max channels supported by vmbus */ static void storvsc_send_multichannel_request(struct hv_device *dev, int max_chans) { struct storvsc_softc *sc; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; int request_channels_cnt = 0; int ret; /* get multichannels count that need to create */ request_channels_cnt = MIN(max_chans, mp_ncpus); sc = get_stor_device(dev, TRUE); if (sc == NULL) { printf("Storvsc_error: get sc failed while send mutilchannel " "request\n"); return; } request = &sc->hs_init_req; /* Establish a handler for multi-channel */ dev->channel->sc_creation_callback = storvsc_handle_sc_creation; /* request the host to create multi-channel */ memset(request, 0, sizeof(struct hv_storvsc_request)); sema_init(&request->synch_sema, 0, ("stor_synch_sema")); vstor_packet = &request->vstor_packet; vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS; vstor_packet->flags = REQUEST_COMPLETION_FLAG; vstor_packet->u.multi_channels_cnt = request_channels_cnt; ret = hv_vmbus_channel_send_packet( dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); /* wait for 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) { printf("Storvsc_error: create multi-channel timeout, %d\n", ret); return; } if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) { printf("Storvsc_error: create multi-channel invalid operation " "(%d) or statue (%u)\n", vstor_packet->operation, vstor_packet->status); return; } sc->hs_open_multi_channel = TRUE; if (bootverbose) printf("Storvsc create multi-channel success!\n"); } /** * @brief initialize channel connection to parent partition * * @param dev a Hyper-V device pointer * @returns 0 on success, non-zero error on failure */ static int hv_storvsc_channel_init(struct hv_device *dev) { int ret = 0; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; struct storvsc_softc *sc; uint16_t max_chans = 0; boolean_t support_multichannel = FALSE; max_chans = 0; support_multichannel = FALSE; sc = get_stor_device(dev, TRUE); if (sc == NULL) return (ENODEV); request = &sc->hs_init_req; memset(request, 0, sizeof(struct hv_storvsc_request)); vstor_packet = &request->vstor_packet; request->softc = sc; /** * Initiate the vsc/vsp initialization protocol on the open channel */ sema_init(&request->synch_sema, 0, ("stor_synch_sema")); vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet( dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) goto cleanup; /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) goto cleanup; if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) { goto cleanup; } /* reuse the packet for version range supported */ memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; vstor_packet->u.version.major_minor = VMSTOR_PROTOCOL_VERSION(storvsc_current_major, storvsc_current_minor); /* revision is only significant for Windows guests */ vstor_packet->u.version.revision = 0; ret = hv_vmbus_channel_send_packet( dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) goto cleanup; /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret) goto cleanup; /* TODO: Check returned version */ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) goto cleanup; /** * Query channel properties */ memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet( dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if ( ret != 0) goto cleanup; /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) goto cleanup; /* TODO: Check returned version */ if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) { goto cleanup; } /* multi-channels feature is supported by WIN8 and above version */ max_chans = vstor_packet->u.chan_props.max_channel_cnt; if ((hv_vmbus_protocal_version != HV_VMBUS_VERSION_WIN7) && (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) && (vstor_packet->u.chan_props.flags & HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) { support_multichannel = TRUE; } memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet( dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) { goto cleanup; } /* wait 5 seconds */ ret = sema_timedwait(&request->synch_sema, 5 * hz); if (ret != 0) goto cleanup; if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO || vstor_packet->status != 0) goto cleanup; /* * If multi-channel is supported, send multichannel create * request to host. */ if (support_multichannel) storvsc_send_multichannel_request(dev, max_chans); cleanup: sema_destroy(&request->synch_sema); return (ret); } /** * @brief Open channel connection to paraent partition StorVSP driver * * Open and initialize channel connection to parent partition StorVSP driver. * * @param pointer to a Hyper-V device * @returns 0 on success, non-zero error on failure */ static int hv_storvsc_connect_vsp(struct hv_device *dev) { int ret = 0; struct vmstor_chan_props props; struct storvsc_softc *sc; sc = device_get_softc(dev->device); memset(&props, 0, sizeof(struct vmstor_chan_props)); /* * Open the channel */ ret = hv_vmbus_channel_open( dev->channel, sc->hs_drv_props->drv_ringbuffer_size, sc->hs_drv_props->drv_ringbuffer_size, (void *)&props, sizeof(struct vmstor_chan_props), hv_storvsc_on_channel_callback, dev->channel); if (ret != 0) { return ret; } ret = hv_storvsc_channel_init(dev); return (ret); } #if HVS_HOST_RESET static int hv_storvsc_host_reset(struct hv_device *dev) { int ret = 0; struct storvsc_softc *sc; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; sc = get_stor_device(dev, TRUE); if (sc == NULL) { return ENODEV; } request = &sc->hs_reset_req; request->softc = sc; vstor_packet = &request->vstor_packet; sema_init(&request->synch_sema, 0, "stor synch sema"); vstor_packet->operation = VSTOR_OPERATION_RESETBUS; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = hv_vmbus_channel_send_packet(dev->channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)&sc->hs_reset_req, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) { goto cleanup; } ret = sema_timedwait(&request->synch_sema, 5 * hz); /* KYS 5 seconds */ if (ret) { goto cleanup; } /* * At this point, all outstanding requests in the adapter * should have been flushed out and return to us */ cleanup: sema_destroy(&request->synch_sema); return (ret); } #endif /* HVS_HOST_RESET */ /** * @brief Function to initiate an I/O request * * @param device Hyper-V device pointer * @param request pointer to a request structure * @returns 0 on success, non-zero error on failure */ static int hv_storvsc_io_request(struct hv_device *device, struct hv_storvsc_request *request) { struct storvsc_softc *sc; struct vstor_packet *vstor_packet = &request->vstor_packet; struct hv_vmbus_channel* outgoing_channel = NULL; int ret = 0; sc = get_stor_device(device, TRUE); if (sc == NULL) { return ENODEV; } vstor_packet->flags |= REQUEST_COMPLETION_FLAG; vstor_packet->u.vm_srb.length = VSTOR_PKT_SIZE; vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size; vstor_packet->u.vm_srb.transfer_len = request->data_buf.length; vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB; outgoing_channel = vmbus_select_outgoing_channel(device->channel); mtx_unlock(&request->softc->hs_lock); if (request->data_buf.length) { ret = hv_vmbus_channel_send_packet_multipagebuffer( outgoing_channel, &request->data_buf, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request); } else { ret = hv_vmbus_channel_send_packet( outgoing_channel, vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request, HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); } mtx_lock(&request->softc->hs_lock); if (ret != 0) { printf("Unable to send packet %p ret %d", vstor_packet, ret); } else { atomic_add_int(&sc->hs_num_out_reqs, 1); } return (ret); } /** * Process IO_COMPLETION_OPERATION and ready * the result to be completed for upper layer * processing by the CAM layer. */ static void hv_storvsc_on_iocompletion(struct storvsc_softc *sc, struct vstor_packet *vstor_packet, struct hv_storvsc_request *request) { struct vmscsi_req *vm_srb; vm_srb = &vstor_packet->u.vm_srb; if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) && (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) { /* Autosense data available */ KASSERT(vm_srb->sense_info_len <= request->sense_info_len, ("vm_srb->sense_info_len <= " "request->sense_info_len")); memcpy(request->sense_data, vm_srb->u.sense_data, vm_srb->sense_info_len); request->sense_info_len = vm_srb->sense_info_len; } /* Complete request by passing to the CAM layer */ storvsc_io_done(request); atomic_subtract_int(&sc->hs_num_out_reqs, 1); if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) { sema_post(&sc->hs_drain_sema); } } static void hv_storvsc_rescan_target(struct storvsc_softc *sc) { path_id_t pathid; target_id_t targetid; union ccb *ccb; pathid = cam_sim_path(sc->hs_sim); targetid = CAM_TARGET_WILDCARD; /* * Allocate a CCB and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("unable to alloc CCB for rescan\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("unable to create path for rescan, pathid: %u," "targetid: %u\n", pathid, targetid); xpt_free_ccb(ccb); return; } if (targetid == CAM_TARGET_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else ccb->ccb_h.func_code = XPT_SCAN_TGT; xpt_rescan(ccb); } static void hv_storvsc_on_channel_callback(void *context) { int ret = 0; hv_vmbus_channel *channel = (hv_vmbus_channel *)context; struct hv_device *device = NULL; struct storvsc_softc *sc; uint32_t bytes_recvd; uint64_t request_id; uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)]; struct hv_storvsc_request *request; struct vstor_packet *vstor_packet; device = channel->device; KASSERT(device, ("device is NULL")); sc = get_stor_device(device, FALSE); if (sc == NULL) { printf("Storvsc_error: get stor device failed.\n"); return; } ret = hv_vmbus_channel_recv_packet( channel, packet, roundup2(VSTOR_PKT_SIZE, 8), &bytes_recvd, &request_id); while ((ret == 0) && (bytes_recvd > 0)) { request = (struct hv_storvsc_request *)(uintptr_t)request_id; if ((request == &sc->hs_init_req) || (request == &sc->hs_reset_req)) { memcpy(&request->vstor_packet, packet, sizeof(struct vstor_packet)); sema_post(&request->synch_sema); } else { vstor_packet = (struct vstor_packet *)packet; switch(vstor_packet->operation) { case VSTOR_OPERATION_COMPLETEIO: if (request == NULL) panic("VMBUS: storvsc received a " "packet with NULL request id in " "COMPLETEIO operation."); hv_storvsc_on_iocompletion(sc, vstor_packet, request); break; case VSTOR_OPERATION_REMOVEDEVICE: printf("VMBUS: storvsc operation %d not " "implemented.\n", vstor_packet->operation); /* TODO: implement */ break; case VSTOR_OPERATION_ENUMERATE_BUS: hv_storvsc_rescan_target(sc); break; default: break; } } ret = hv_vmbus_channel_recv_packet( channel, packet, roundup2(VSTOR_PKT_SIZE, 8), &bytes_recvd, &request_id); } } /** * @brief StorVSC probe function * * Device probe function. Returns 0 if the input device is a StorVSC * device. Otherwise, a ENXIO is returned. If the input device is * for BlkVSC (paravirtual IDE) device and this support is disabled in * favor of the emulated ATA/IDE device, return ENXIO. * * @param a device * @returns 0 on success, ENXIO if not a matcing StorVSC device */ static int storvsc_probe(device_t dev) { int ata_disk_enable = 0; int ret = ENXIO; if (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008 || hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7) { sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); storvsc_current_major = STORVSC_WIN7_MAJOR; storvsc_current_minor = STORVSC_WIN7_MINOR; } else { sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; vmscsi_size_delta = 0; storvsc_current_major = STORVSC_WIN8_MAJOR; storvsc_current_minor = STORVSC_WIN8_MINOR; } switch (storvsc_get_storage_type(dev)) { case DRIVER_BLKVSC: if(bootverbose) device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n"); if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) { if(bootverbose) device_printf(dev, "Enlightened ATA/IDE detected\n"); ret = BUS_PROBE_DEFAULT; } else if(bootverbose) device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n"); break; case DRIVER_STORVSC: if(bootverbose) device_printf(dev, "Enlightened SCSI device detected\n"); ret = BUS_PROBE_DEFAULT; break; default: ret = ENXIO; } return (ret); } /** * @brief StorVSC attach function * * Function responsible for allocating per-device structures, * setting up CAM interfaces and scanning for available LUNs to * be used for SCSI device peripherals. * * @param a device * @returns 0 on success or an error on failure */ static int storvsc_attach(device_t dev) { struct hv_device *hv_dev = vmbus_get_devctx(dev); enum hv_storage_type stor_type; struct storvsc_softc *sc; struct cam_devq *devq; int ret, i, j; struct hv_storvsc_request *reqp; struct root_hold_token *root_mount_token = NULL; struct hv_sgl_node *sgl_node = NULL; void *tmp_buff = NULL; /* * We need to serialize storvsc attach calls. */ root_mount_token = root_mount_hold("storvsc"); sc = device_get_softc(dev); - if (sc == NULL) { - ret = ENOMEM; - goto cleanup; - } stor_type = storvsc_get_storage_type(dev); if (stor_type == DRIVER_UNKNOWN) { ret = ENODEV; goto cleanup; } bzero(sc, sizeof(struct storvsc_softc)); /* fill in driver specific properties */ sc->hs_drv_props = &g_drv_props_table[stor_type]; /* fill in device specific properties */ sc->hs_unit = device_get_unit(dev); sc->hs_dev = hv_dev; device_set_desc(dev, g_drv_props_table[stor_type].drv_desc); LIST_INIT(&sc->hs_free_list); mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF); for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) { reqp = malloc(sizeof(struct hv_storvsc_request), M_DEVBUF, M_WAITOK|M_ZERO); reqp->softc = sc; LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link); } /* create sg-list page pool */ if (FALSE == g_hv_sgl_page_pool.is_init) { g_hv_sgl_page_pool.is_init = TRUE; LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list); LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list); /* * Pre-create SG list, each SG list with * HV_MAX_MULTIPAGE_BUFFER_COUNT segments, each * segment has one page buffer */ for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) { sgl_node = malloc(sizeof(struct hv_sgl_node), M_DEVBUF, M_WAITOK|M_ZERO); sgl_node->sgl_data = sglist_alloc(HV_MAX_MULTIPAGE_BUFFER_COUNT, M_WAITOK|M_ZERO); for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) { tmp_buff = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK|M_ZERO); sgl_node->sgl_data->sg_segs[j].ss_paddr = (vm_paddr_t)tmp_buff; } LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link); } } sc->hs_destroy = FALSE; sc->hs_drain_notify = FALSE; sc->hs_open_multi_channel = FALSE; sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema"); ret = hv_storvsc_connect_vsp(hv_dev); if (ret != 0) { goto cleanup; } /* * Create the device queue. * Hyper-V maps each target to one SCSI HBA */ devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target); if (devq == NULL) { device_printf(dev, "Failed to alloc device queue\n"); ret = ENOMEM; goto cleanup; } sc->hs_sim = cam_sim_alloc(storvsc_action, storvsc_poll, sc->hs_drv_props->drv_name, sc, sc->hs_unit, &sc->hs_lock, 1, sc->hs_drv_props->drv_max_ios_per_target, devq); if (sc->hs_sim == NULL) { device_printf(dev, "Failed to alloc sim\n"); cam_simq_free(devq); ret = ENOMEM; goto cleanup; } mtx_lock(&sc->hs_lock); /* bus_id is set to 0, need to get it from VMBUS channel query? */ if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(sc->hs_sim, /*free_devq*/TRUE); mtx_unlock(&sc->hs_lock); device_printf(dev, "Unable to register SCSI bus\n"); ret = ENXIO; goto cleanup; } if (xpt_create_path(&sc->hs_path, /*periph*/NULL, cam_sim_path(sc->hs_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sc->hs_sim)); cam_sim_free(sc->hs_sim, /*free_devq*/TRUE); mtx_unlock(&sc->hs_lock); device_printf(dev, "Unable to create path\n"); ret = ENXIO; goto cleanup; } mtx_unlock(&sc->hs_lock); root_mount_rel(root_mount_token); return (0); cleanup: root_mount_rel(root_mount_token); while (!LIST_EMPTY(&sc->hs_free_list)) { reqp = LIST_FIRST(&sc->hs_free_list); LIST_REMOVE(reqp, link); free(reqp, M_DEVBUF); } while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); LIST_REMOVE(sgl_node, link); for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) { if (NULL != (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) { free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF); } } sglist_free(sgl_node->sgl_data); free(sgl_node, M_DEVBUF); } return (ret); } /** * @brief StorVSC device detach function * * This function is responsible for safely detaching a * StorVSC device. This includes waiting for inbound responses * to complete and freeing associated per-device structures. * * @param dev a device * returns 0 on success */ static int storvsc_detach(device_t dev) { struct storvsc_softc *sc = device_get_softc(dev); struct hv_storvsc_request *reqp = NULL; struct hv_device *hv_device = vmbus_get_devctx(dev); struct hv_sgl_node *sgl_node = NULL; int j = 0; sc->hs_destroy = TRUE; /* * At this point, all outbound traffic should be disabled. We * only allow inbound traffic (responses) to proceed so that * outstanding requests can be completed. */ sc->hs_drain_notify = TRUE; sema_wait(&sc->hs_drain_sema); sc->hs_drain_notify = FALSE; /* * Since we have already drained, we don't need to busy wait. * The call to close the channel will reset the callback * under the protection of the incoming channel lock. */ hv_vmbus_channel_close(hv_device->channel); mtx_lock(&sc->hs_lock); while (!LIST_EMPTY(&sc->hs_free_list)) { reqp = LIST_FIRST(&sc->hs_free_list); LIST_REMOVE(reqp, link); free(reqp, M_DEVBUF); } mtx_unlock(&sc->hs_lock); while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); LIST_REMOVE(sgl_node, link); for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){ if (NULL != (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) { free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF); } } sglist_free(sgl_node->sgl_data); free(sgl_node, M_DEVBUF); } return (0); } #if HVS_TIMEOUT_TEST /** * @brief unit test for timed out operations * * This function provides unit testing capability to simulate * timed out operations. Recompilation with HV_TIMEOUT_TEST=1 * is required. * * @param reqp pointer to a request structure * @param opcode SCSI operation being performed * @param wait if 1, wait for I/O to complete */ static void storvsc_timeout_test(struct hv_storvsc_request *reqp, uint8_t opcode, int wait) { int ret; union ccb *ccb = reqp->ccb; struct storvsc_softc *sc = reqp->softc; if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) { return; } if (wait) { mtx_lock(&reqp->event.mtx); } ret = hv_storvsc_io_request(sc->hs_dev, reqp); if (ret != 0) { if (wait) { mtx_unlock(&reqp->event.mtx); } printf("%s: io_request failed with %d.\n", __func__, ret); ccb->ccb_h.status = CAM_PROVIDE_FAIL; mtx_lock(&sc->hs_lock); storvsc_free_request(sc, reqp); xpt_done(ccb); mtx_unlock(&sc->hs_lock); return; } if (wait) { xpt_print(ccb->ccb_h.path, "%u: %s: waiting for IO return.\n", ticks, __func__); ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz); mtx_unlock(&reqp->event.mtx); xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n", ticks, __func__, (ret == 0)? "IO return detected" : "IO return not detected"); /* * Now both the timer handler and io done are running * simultaneously. We want to confirm the io done always * finishes after the timer handler exits. So reqp used by * timer handler is not freed or stale. Do busy loop for * another 1/10 second to make sure io done does * wait for the timer handler to complete. */ DELAY(100*1000); mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "%u: %s: finishing, queue frozen %d, " "ccb status 0x%x scsi_status 0x%x.\n", ticks, __func__, sc->hs_frozen, ccb->ccb_h.status, ccb->csio.scsi_status); mtx_unlock(&sc->hs_lock); } } #endif /* HVS_TIMEOUT_TEST */ /** * @brief timeout handler for requests * * This function is called as a result of a callout expiring. * * @param arg pointer to a request */ static void storvsc_timeout(void *arg) { struct hv_storvsc_request *reqp = arg; struct storvsc_softc *sc = reqp->softc; union ccb *ccb = reqp->ccb; if (reqp->retries == 0) { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "%u: IO timed out (req=0x%p), wait for another %u secs.\n", ticks, reqp, ccb->ccb_h.timeout / 1000); cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL); mtx_unlock(&sc->hs_lock); reqp->retries++; callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout, 0, storvsc_timeout, reqp, 0); #if HVS_TIMEOUT_TEST storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0); #endif return; } mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n", ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000, (sc->hs_frozen == 0)? "freezing the queue" : "the queue is already frozen"); if (sc->hs_frozen == 0) { sc->hs_frozen = 1; xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1); } mtx_unlock(&sc->hs_lock); #if HVS_TIMEOUT_TEST storvsc_timeout_test(reqp, MODE_SELECT_10, 1); #endif } /** * @brief StorVSC device poll function * * This function is responsible for servicing requests when * interrupts are disabled (i.e when we are dumping core.) * * @param sim a pointer to a CAM SCSI interface module */ static void storvsc_poll(struct cam_sim *sim) { struct storvsc_softc *sc = cam_sim_softc(sim); mtx_assert(&sc->hs_lock, MA_OWNED); mtx_unlock(&sc->hs_lock); hv_storvsc_on_channel_callback(sc->hs_dev->channel); mtx_lock(&sc->hs_lock); } /** * @brief StorVSC device action function * * This function is responsible for handling SCSI operations which * are passed from the CAM layer. The requests are in the form of * CAM control blocks which indicate the action being performed. * Not all actions require converting the request to a VSCSI protocol * message - these actions can be responded to by this driver. * Requests which are destined for a backend storage device are converted * to a VSCSI protocol message and sent on the channel connection associated * with this device. * * @param sim pointer to a CAM SCSI interface module * @param ccb pointer to a CAM control block */ static void storvsc_action(struct cam_sim *sim, union ccb *ccb) { struct storvsc_softc *sc = cam_sim_softc(sim); int res; mtx_assert(&sc->hs_lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = STORVSC_MAX_TARGETS; cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target; cpi->initiator_id = cpi->max_target; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 300000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; cts->transport = XPORT_SAS; cts->transport_version = 0; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; /* enable tag queuing and disconnected mode */ cts->proto_specific.valid = CTS_SCSI_VALID_TQ; cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; cts->xport_specific.valid = CTS_SPI_VALID_DISC; cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_SET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } case XPT_CALC_GEOMETRY:{ cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); return; } case XPT_RESET_BUS: case XPT_RESET_DEV:{ #if HVS_HOST_RESET if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) { xpt_print(ccb->ccb_h.path, "hv_storvsc_host_reset failed with %d\n", res); ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); return; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; #else xpt_print(ccb->ccb_h.path, "%s reset not supported.\n", (ccb->ccb_h.func_code == XPT_RESET_BUS)? "bus" : "dev"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; #endif /* HVS_HOST_RESET */ } case XPT_SCSI_IO: case XPT_IMMED_NOTIFY: { struct hv_storvsc_request *reqp = NULL; if (ccb->csio.cdb_len == 0) { panic("cdl_len is 0\n"); } if (LIST_EMPTY(&sc->hs_free_list)) { ccb->ccb_h.status = CAM_REQUEUE_REQ; if (sc->hs_frozen == 0) { sc->hs_frozen = 1; xpt_freeze_simq(sim, /* count*/1); } xpt_done(ccb); return; } reqp = LIST_FIRST(&sc->hs_free_list); LIST_REMOVE(reqp, link); bzero(reqp, sizeof(struct hv_storvsc_request)); reqp->softc = sc; ccb->ccb_h.status |= CAM_SIM_QUEUED; if ((res = create_storvsc_request(ccb, reqp)) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { callout_init(&reqp->callout, 1); callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout, 0, storvsc_timeout, reqp, 0); #if HVS_TIMEOUT_TEST cv_init(&reqp->event.cv, "storvsc timeout cv"); mtx_init(&reqp->event.mtx, "storvsc timeout mutex", NULL, MTX_DEF); switch (reqp->vstor_packet.vm_srb.cdb[0]) { case MODE_SELECT_10: case SEND_DIAGNOSTIC: /* To have timer send the request. */ return; default: break; } #endif /* HVS_TIMEOUT_TEST */ } if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) { xpt_print(ccb->ccb_h.path, "hv_storvsc_io_request failed with %d\n", res); ccb->ccb_h.status = CAM_PROVIDE_FAIL; storvsc_free_request(sc, reqp); xpt_done(ccb); return; } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } } /** * @brief destroy bounce buffer * * This function is responsible for destroy a Scatter/Gather list * that create by storvsc_create_bounce_buffer() * * @param sgl- the Scatter/Gather need be destroy * @param sg_count- page count of the SG list. * */ static void storvsc_destroy_bounce_buffer(struct sglist *sgl) { struct hv_sgl_node *sgl_node = NULL; if (LIST_EMPTY(&g_hv_sgl_page_pool.in_use_sgl_list)) { printf("storvsc error: not enough in use sgl\n"); return; } sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list); LIST_REMOVE(sgl_node, link); sgl_node->sgl_data = sgl; LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link); } /** * @brief create bounce buffer * * This function is responsible for create a Scatter/Gather list, * which hold several pages that can be aligned with page size. * * @param seg_count- SG-list segments count * @param write - if WRITE_TYPE, set SG list page used size to 0, * otherwise set used size to page size. * * return NULL if create failed */ static struct sglist * storvsc_create_bounce_buffer(uint16_t seg_count, int write) { int i = 0; struct sglist *bounce_sgl = NULL; unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE); struct hv_sgl_node *sgl_node = NULL; /* get struct sglist from free_sgl_list */ if (LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) { printf("storvsc error: not enough free sgl\n"); return NULL; } sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list); LIST_REMOVE(sgl_node, link); bounce_sgl = sgl_node->sgl_data; LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link); bounce_sgl->sg_maxseg = seg_count; if (write == WRITE_TYPE) bounce_sgl->sg_nseg = 0; else bounce_sgl->sg_nseg = seg_count; for (i = 0; i < seg_count; i++) bounce_sgl->sg_segs[i].ss_len = buf_len; return bounce_sgl; } /** * @brief copy data from SG list to bounce buffer * * This function is responsible for copy data from one SG list's segments * to another SG list which used as bounce buffer. * * @param bounce_sgl - the destination SG list * @param orig_sgl - the segment of the source SG list. * @param orig_sgl_count - the count of segments. * @param orig_sgl_count - indicate which segment need bounce buffer, * set 1 means need. * */ static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl, bus_dma_segment_t *orig_sgl, unsigned int orig_sgl_count, uint64_t seg_bits) { int src_sgl_idx = 0; for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) { if (seg_bits & (1 << src_sgl_idx)) { memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr, (void*)orig_sgl[src_sgl_idx].ds_addr, orig_sgl[src_sgl_idx].ds_len); bounce_sgl->sg_segs[src_sgl_idx].ss_len = orig_sgl[src_sgl_idx].ds_len; } } } /** * @brief copy data from SG list which used as bounce to another SG list * * This function is responsible for copy data from one SG list with bounce * buffer to another SG list's segments. * * @param dest_sgl - the destination SG list's segments * @param dest_sgl_count - the count of destination SG list's segment. * @param src_sgl - the source SG list. * @param seg_bits - indicate which segment used bounce buffer of src SG-list. * */ void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl, unsigned int dest_sgl_count, struct sglist* src_sgl, uint64_t seg_bits) { int sgl_idx = 0; for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) { if (seg_bits & (1 << sgl_idx)) { memcpy((void*)(dest_sgl[sgl_idx].ds_addr), (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr), src_sgl->sg_segs[sgl_idx].ss_len); } } } /** * @brief check SG list with bounce buffer or not * * This function is responsible for check if need bounce buffer for SG list. * * @param sgl - the SG list's segments * @param sg_count - the count of SG list's segment. * @param bits - segmengs number that need bounce buffer * * return -1 if SG list needless bounce buffer */ static int storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl, unsigned int sg_count, uint64_t *bits) { int i = 0; int offset = 0; uint64_t phys_addr = 0; uint64_t tmp_bits = 0; boolean_t found_hole = FALSE; boolean_t pre_aligned = TRUE; if (sg_count < 2){ return -1; } *bits = 0; phys_addr = vtophys(sgl[0].ds_addr); offset = phys_addr - trunc_page(phys_addr); if (offset != 0) { pre_aligned = FALSE; tmp_bits |= 1; } for (i = 1; i < sg_count; i++) { phys_addr = vtophys(sgl[i].ds_addr); offset = phys_addr - trunc_page(phys_addr); if (offset == 0) { if (FALSE == pre_aligned){ /* * This segment is aligned, if the previous * one is not aligned, find a hole */ found_hole = TRUE; } pre_aligned = TRUE; } else { tmp_bits |= 1 << i; if (!pre_aligned) { if (phys_addr != vtophys(sgl[i-1].ds_addr + sgl[i-1].ds_len)) { /* * Check whether connect to previous * segment,if not, find the hole */ found_hole = TRUE; } } else { found_hole = TRUE; } pre_aligned = FALSE; } } if (!found_hole) { return (-1); } else { *bits = tmp_bits; return 0; } } /** * @brief Fill in a request structure based on a CAM control block * * Fills in a request structure based on the contents of a CAM control * block. The request structure holds the payload information for * VSCSI protocol request. * * @param ccb pointer to a CAM contorl block * @param reqp pointer to a request structure */ static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp) { struct ccb_scsiio *csio = &ccb->csio; uint64_t phys_addr; uint32_t bytes_to_copy = 0; uint32_t pfn_num = 0; uint32_t pfn; uint64_t not_aligned_seg_bits = 0; /* refer to struct vmscsi_req for meanings of these two fields */ reqp->vstor_packet.u.vm_srb.port = cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)); reqp->vstor_packet.u.vm_srb.path_id = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id; reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun; reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len; if(ccb->ccb_h.flags & CAM_CDB_POINTER) { memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr, csio->cdb_len); } else { memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes, csio->cdb_len); } switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_OUT: reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE; break; case CAM_DIR_IN: reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE; break; case CAM_DIR_NONE: reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; break; default: reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE; break; } reqp->sense_data = &csio->sense_data; reqp->sense_info_len = csio->sense_len; reqp->ccb = ccb; if (0 == csio->dxfer_len) { return (0); } reqp->data_buf.length = csio->dxfer_len; switch (ccb->ccb_h.flags & CAM_DATA_MASK) { case CAM_DATA_VADDR: { bytes_to_copy = csio->dxfer_len; phys_addr = vtophys(csio->data_ptr); reqp->data_buf.offset = phys_addr & PAGE_MASK; while (bytes_to_copy != 0) { int bytes, page_offset; phys_addr = vtophys(&csio->data_ptr[reqp->data_buf.length - bytes_to_copy]); pfn = phys_addr >> PAGE_SHIFT; reqp->data_buf.pfn_array[pfn_num] = pfn; page_offset = phys_addr & PAGE_MASK; bytes = min(PAGE_SIZE - page_offset, bytes_to_copy); bytes_to_copy -= bytes; pfn_num++; } break; } case CAM_DATA_SG: { int i = 0; int offset = 0; int ret; bus_dma_segment_t *storvsc_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt; printf("Storvsc: get SG I/O operation, %d\n", reqp->vstor_packet.u.vm_srb.data_in); if (storvsc_sg_count > HV_MAX_MULTIPAGE_BUFFER_COUNT){ printf("Storvsc: %d segments is too much, " "only support %d segments\n", storvsc_sg_count, HV_MAX_MULTIPAGE_BUFFER_COUNT); return (EINVAL); } /* * We create our own bounce buffer function currently. Idealy * we should use BUS_DMA(9) framework. But with current BUS_DMA * code there is no callback API to check the page alignment of * middle segments before busdma can decide if a bounce buffer * is needed for particular segment. There is callback, * "bus_dma_filter_t *filter", but the parrameters are not * sufficient for storvsc driver. * TODO: * Add page alignment check in BUS_DMA(9) callback. Once * this is complete, switch the following code to use * BUS_DMA(9) for storvsc bounce buffer support. */ /* check if we need to create bounce buffer */ ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist, storvsc_sg_count, ¬_aligned_seg_bits); if (ret != -1) { reqp->bounce_sgl = storvsc_create_bounce_buffer(storvsc_sg_count, reqp->vstor_packet.u.vm_srb.data_in); if (NULL == reqp->bounce_sgl) { printf("Storvsc_error: " "create bounce buffer failed.\n"); return (ENOMEM); } reqp->bounce_sgl_count = storvsc_sg_count; reqp->not_aligned_seg_bits = not_aligned_seg_bits; /* * if it is write, we need copy the original data *to bounce buffer */ if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) { storvsc_copy_sgl_to_bounce_buf( reqp->bounce_sgl, storvsc_sglist, storvsc_sg_count, reqp->not_aligned_seg_bits); } /* transfer virtual address to physical frame number */ if (reqp->not_aligned_seg_bits & 0x1){ phys_addr = vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr); }else{ phys_addr = vtophys(storvsc_sglist[0].ds_addr); } reqp->data_buf.offset = phys_addr & PAGE_MASK; pfn = phys_addr >> PAGE_SHIFT; reqp->data_buf.pfn_array[0] = pfn; for (i = 1; i < storvsc_sg_count; i++) { if (reqp->not_aligned_seg_bits & (1 << i)) { phys_addr = vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr); } else { phys_addr = vtophys(storvsc_sglist[i].ds_addr); } pfn = phys_addr >> PAGE_SHIFT; reqp->data_buf.pfn_array[i] = pfn; } } else { phys_addr = vtophys(storvsc_sglist[0].ds_addr); reqp->data_buf.offset = phys_addr & PAGE_MASK; for (i = 0; i < storvsc_sg_count; i++) { phys_addr = vtophys(storvsc_sglist[i].ds_addr); pfn = phys_addr >> PAGE_SHIFT; reqp->data_buf.pfn_array[i] = pfn; } /* check the last segment cross boundary or not */ offset = phys_addr & PAGE_MASK; if (offset) { phys_addr = vtophys(storvsc_sglist[i-1].ds_addr + PAGE_SIZE - offset); pfn = phys_addr >> PAGE_SHIFT; reqp->data_buf.pfn_array[i] = pfn; } reqp->bounce_sgl_count = 0; } break; } default: printf("Unknow flags: %d\n", ccb->ccb_h.flags); return(EINVAL); } return(0); } /* * Modified based on scsi_print_inquiry which is responsible to * print the detail information for scsi_inquiry_data. * * Return 1 if it is valid, 0 otherwise. */ static inline int is_inquiry_valid(const struct scsi_inquiry_data *inq_data) { uint8_t type; char vendor[16], product[48], revision[16]; /* * Check device type and qualifier */ if (!(SID_QUAL_IS_VENDOR_UNIQUE(inq_data) || SID_QUAL(inq_data) == SID_QUAL_LU_CONNECTED)) return (0); type = SID_TYPE(inq_data); switch (type) { case T_DIRECT: case T_SEQUENTIAL: case T_PRINTER: case T_PROCESSOR: case T_WORM: case T_CDROM: case T_SCANNER: case T_OPTICAL: case T_CHANGER: case T_COMM: case T_STORARRAY: case T_ENCLOSURE: case T_RBC: case T_OCRW: case T_OSD: case T_ADC: break; case T_NODEVICE: default: return (0); } /* * Check vendor, product, and revision */ cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor), sizeof(vendor)); cam_strvis(product, inq_data->product, sizeof(inq_data->product), sizeof(product)); cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision), sizeof(revision)); if (strlen(vendor) == 0 || strlen(product) == 0 || strlen(revision) == 0) return (0); return (1); } /** * @brief completion function before returning to CAM * * I/O process has been completed and the result needs * to be passed to the CAM layer. * Free resources related to this request. * * @param reqp pointer to a request structure */ static void storvsc_io_done(struct hv_storvsc_request *reqp) { union ccb *ccb = reqp->ccb; struct ccb_scsiio *csio = &ccb->csio; struct storvsc_softc *sc = reqp->softc; struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb; bus_dma_segment_t *ori_sglist = NULL; int ori_sg_count = 0; /* destroy bounce buffer if it is used */ if (reqp->bounce_sgl_count) { ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; ori_sg_count = ccb->csio.sglist_cnt; /* * If it is READ operation, we should copy back the data * to original SG list. */ if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) { storvsc_copy_from_bounce_buf_to_sgl(ori_sglist, ori_sg_count, reqp->bounce_sgl, reqp->not_aligned_seg_bits); } storvsc_destroy_bounce_buffer(reqp->bounce_sgl); reqp->bounce_sgl_count = 0; } if (reqp->retries > 0) { mtx_lock(&sc->hs_lock); #if HVS_TIMEOUT_TEST xpt_print(ccb->ccb_h.path, "%u: IO returned after timeout, " "waking up timer handler if any.\n", ticks); mtx_lock(&reqp->event.mtx); cv_signal(&reqp->event.cv); mtx_unlock(&reqp->event.mtx); #endif reqp->retries = 0; xpt_print(ccb->ccb_h.path, "%u: IO returned after timeout, " "stopping timer if any.\n", ticks); mtx_unlock(&sc->hs_lock); } /* * callout_drain() will wait for the timer handler to finish * if it is running. So we don't need any lock to synchronize * between this routine and the timer handler. * Note that we need to make sure reqp is not freed when timer * handler is using or will use it. */ if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { callout_drain(&reqp->callout); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; if (vm_srb->scsi_status == SCSI_STATUS_OK) { const struct scsi_generic *cmd; /* * Check whether the data for INQUIRY cmd is valid or * not. Windows 10 and Windows 2016 send all zero * inquiry data to VM even for unpopulated slots. */ cmd = (const struct scsi_generic *) ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes); if (cmd->opcode == INQUIRY && is_inquiry_valid( (const struct scsi_inquiry_data *)csio->data_ptr) == 0) { ccb->ccb_h.status |= CAM_DEV_NOT_THERE; if (bootverbose) { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "storvsc uninstalled device\n"); mtx_unlock(&sc->hs_lock); } } else { ccb->ccb_h.status |= CAM_REQ_CMP; } } else { mtx_lock(&sc->hs_lock); xpt_print(ccb->ccb_h.path, "storvsc scsi_status = %d\n", vm_srb->scsi_status); mtx_unlock(&sc->hs_lock); ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; } ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF); ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len; if (reqp->sense_info_len != 0) { csio->sense_resid = csio->sense_len - reqp->sense_info_len; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } mtx_lock(&sc->hs_lock); if (reqp->softc->hs_frozen == 1) { xpt_print(ccb->ccb_h.path, "%u: storvsc unfreezing softc 0x%p.\n", ticks, reqp->softc); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; reqp->softc->hs_frozen = 0; } storvsc_free_request(sc, reqp); xpt_done(ccb); mtx_unlock(&sc->hs_lock); } /** * @brief Free a request structure * * Free a request structure by returning it to the free list * * @param sc pointer to a softc * @param reqp pointer to a request structure */ static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp) { LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link); } /** * @brief Determine type of storage device from GUID * * Using the type GUID, determine if this is a StorVSC (paravirtual * SCSI or BlkVSC (paravirtual IDE) device. * * @param dev a device * returns an enum */ static enum hv_storage_type storvsc_get_storage_type(device_t dev) { const char *p = vmbus_get_type(dev); if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) { return DRIVER_BLKVSC; } else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) { return DRIVER_STORVSC; } return (DRIVER_UNKNOWN); } Index: projects/release-pkg/sys/dev/hyperv/vmbus/hv_connection.c =================================================================== --- projects/release-pkg/sys/dev/hyperv/vmbus/hv_connection.c (revision 297926) +++ projects/release-pkg/sys/dev/hyperv/vmbus/hv_connection.c (revision 297927) @@ -1,414 +1,415 @@ /*- * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include "hv_vmbus_priv.h" /* * Globals */ hv_vmbus_connection hv_vmbus_g_connection = { .connect_state = HV_DISCONNECTED, .next_gpadl_handle = 0xE1E10, }; uint32_t hv_vmbus_protocal_version = HV_VMBUS_VERSION_WS2008; static uint32_t hv_vmbus_get_next_version(uint32_t current_ver) { switch (current_ver) { case (HV_VMBUS_VERSION_WIN7): return(HV_VMBUS_VERSION_WS2008); case (HV_VMBUS_VERSION_WIN8): return(HV_VMBUS_VERSION_WIN7); case (HV_VMBUS_VERSION_WIN8_1): return(HV_VMBUS_VERSION_WIN8); case (HV_VMBUS_VERSION_WS2008): default: return(HV_VMBUS_VERSION_INVALID); } } /** * Negotiate the highest supported hypervisor version. */ static int hv_vmbus_negotiate_version(hv_vmbus_channel_msg_info *msg_info, uint32_t version) { int ret = 0; hv_vmbus_channel_initiate_contact *msg; sema_init(&msg_info->wait_sema, 0, "Msg Info Sema"); msg = (hv_vmbus_channel_initiate_contact*) msg_info->msg; msg->header.message_type = HV_CHANNEL_MESSAGE_INITIATED_CONTACT; msg->vmbus_version_requested = version; msg->interrupt_page = hv_get_phys_addr( hv_vmbus_g_connection.interrupt_page); msg->monitor_page_1 = hv_get_phys_addr( hv_vmbus_g_connection.monitor_page_1); msg->monitor_page_2 = hv_get_phys_addr( hv_vmbus_g_connection.monitor_page_2); /** * Add to list before we send the request since we may receive the * response before returning from this routine */ mtx_lock(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_INSERT_TAIL( &hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock); ret = hv_vmbus_post_message( msg, sizeof(hv_vmbus_channel_initiate_contact)); if (ret != 0) { mtx_lock(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_REMOVE( &hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock); return (ret); } /** * Wait for the connection response */ ret = sema_timedwait(&msg_info->wait_sema, 5 * hz); /* KYS 5 seconds */ mtx_lock(&hv_vmbus_g_connection.channel_msg_lock); TAILQ_REMOVE( &hv_vmbus_g_connection.channel_msg_anchor, msg_info, msg_list_entry); mtx_unlock(&hv_vmbus_g_connection.channel_msg_lock); /** * Check if successful */ if (msg_info->response.version_response.version_supported) { hv_vmbus_g_connection.connect_state = HV_CONNECTED; } else { ret = ECONNREFUSED; } return (ret); } /** * Send a connect request on the partition service connection */ int hv_vmbus_connect(void) { int ret = 0; uint32_t version; hv_vmbus_channel_msg_info* msg_info = NULL; /** * Make sure we are not connecting or connected */ if (hv_vmbus_g_connection.connect_state != HV_DISCONNECTED) { return (-1); } /** * Initialize the vmbus connection */ hv_vmbus_g_connection.connect_state = HV_CONNECTING; TAILQ_INIT(&hv_vmbus_g_connection.channel_msg_anchor); mtx_init(&hv_vmbus_g_connection.channel_msg_lock, "vmbus channel msg", NULL, MTX_DEF); TAILQ_INIT(&hv_vmbus_g_connection.channel_anchor); mtx_init(&hv_vmbus_g_connection.channel_lock, "vmbus channel", NULL, MTX_DEF); /** * Setup the vmbus event connection for channel interrupt abstraction * stuff */ hv_vmbus_g_connection.interrupt_page = malloc( PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); hv_vmbus_g_connection.recv_interrupt_page = hv_vmbus_g_connection.interrupt_page; hv_vmbus_g_connection.send_interrupt_page = ((uint8_t *) hv_vmbus_g_connection.interrupt_page + (PAGE_SIZE >> 1)); /** * Set up the monitor notification facility. The 1st page for * parent->child and the 2nd page for child->parent */ hv_vmbus_g_connection.monitor_page_1 = malloc( PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); hv_vmbus_g_connection.monitor_page_2 = malloc( PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); msg_info = (hv_vmbus_channel_msg_info*) malloc(sizeof(hv_vmbus_channel_msg_info) + sizeof(hv_vmbus_channel_initiate_contact), M_DEVBUF, M_WAITOK | M_ZERO); hv_vmbus_g_connection.channels = malloc(sizeof(hv_vmbus_channel*) * HV_CHANNEL_MAX_COUNT, M_DEVBUF, M_WAITOK | M_ZERO); /* * Find the highest vmbus version number we can support. */ version = HV_VMBUS_VERSION_CURRENT; do { ret = hv_vmbus_negotiate_version(msg_info, version); if (ret == EWOULDBLOCK) { /* * We timed out. */ goto cleanup; } if (hv_vmbus_g_connection.connect_state == HV_CONNECTED) break; version = hv_vmbus_get_next_version(version); } while (version != HV_VMBUS_VERSION_INVALID); hv_vmbus_protocal_version = version; if (bootverbose) printf("VMBUS: Protocol Version: %d.%d\n", version >> 16, version & 0xFFFF); sema_destroy(&msg_info->wait_sema); free(msg_info, M_DEVBUF); return (0); /* * Cleanup after failure! */ cleanup: hv_vmbus_g_connection.connect_state = HV_DISCONNECTED; mtx_destroy(&hv_vmbus_g_connection.channel_lock); mtx_destroy(&hv_vmbus_g_connection.channel_msg_lock); if (hv_vmbus_g_connection.interrupt_page != NULL) { free(hv_vmbus_g_connection.interrupt_page, M_DEVBUF); hv_vmbus_g_connection.interrupt_page = NULL; } free(hv_vmbus_g_connection.monitor_page_1, M_DEVBUF); free(hv_vmbus_g_connection.monitor_page_2, M_DEVBUF); if (msg_info) { sema_destroy(&msg_info->wait_sema); free(msg_info, M_DEVBUF); } free(hv_vmbus_g_connection.channels, M_DEVBUF); return (ret); } /** * Send a disconnect request on the partition service connection */ int hv_vmbus_disconnect(void) { int ret = 0; hv_vmbus_channel_unload msg; msg.message_type = HV_CHANNEL_MESSAGE_UNLOAD; ret = hv_vmbus_post_message(&msg, sizeof(hv_vmbus_channel_unload)); free(hv_vmbus_g_connection.interrupt_page, M_DEVBUF); mtx_destroy(&hv_vmbus_g_connection.channel_msg_lock); free(hv_vmbus_g_connection.channels, M_DEVBUF); hv_vmbus_g_connection.connect_state = HV_DISCONNECTED; return (ret); } /** * Handler for events */ void hv_vmbus_on_events(int cpu) { int bit; int dword; void *page_addr; uint32_t* recv_interrupt_page = NULL; int rel_id; int maxdword; hv_vmbus_synic_event_flags *event; /* int maxdword = PAGE_SIZE >> 3; */ KASSERT(cpu <= mp_maxid, ("VMBUS: hv_vmbus_on_events: " "cpu out of range!")); + page_addr = hv_vmbus_g_context.syn_ic_event_page[cpu]; + event = (hv_vmbus_synic_event_flags *) + page_addr + HV_VMBUS_MESSAGE_SINT; if ((hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008) || (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7)) { maxdword = HV_MAX_NUM_CHANNELS_SUPPORTED >> 5; /* * receive size is 1/2 page and divide that by 4 bytes */ - recv_interrupt_page = - hv_vmbus_g_connection.recv_interrupt_page; + if (synch_test_and_clear_bit(0, &event->flags32[0])) + recv_interrupt_page = + hv_vmbus_g_connection.recv_interrupt_page; } else { /* * On Host with Win8 or above, the event page can be * checked directly to get the id of the channel * that has the pending interrupt. */ maxdword = HV_EVENT_FLAGS_DWORD_COUNT; - page_addr = hv_vmbus_g_context.syn_ic_event_page[cpu]; - event = (hv_vmbus_synic_event_flags *) - page_addr + HV_VMBUS_MESSAGE_SINT; recv_interrupt_page = event->flags32; } /* * Check events */ if (recv_interrupt_page != NULL) { for (dword = 0; dword < maxdword; dword++) { if (recv_interrupt_page[dword]) { for (bit = 0; bit < HV_CHANNEL_DWORD_LEN; bit++) { if (synch_test_and_clear_bit(bit, (uint32_t *) &recv_interrupt_page[dword])) { rel_id = (dword << 5) + bit; if (rel_id == 0) { /* * Special case - * vmbus channel protocol msg. */ continue; } else { hv_vmbus_channel * channel = hv_vmbus_g_connection.channels[rel_id]; /* if channel is closed or closing */ if (channel == NULL || channel->rxq == NULL) continue; if (channel->batched_reading) hv_ring_buffer_read_begin(&channel->inbound); taskqueue_enqueue(channel->rxq, &channel->channel_task); } } } } } } return; } /** * Send a msg on the vmbus's message connection */ int hv_vmbus_post_message(void *buffer, size_t bufferLen) { hv_vmbus_connection_id connId; sbintime_t time = SBT_1MS; int retries; int ret; connId.as_uint32_t = 0; connId.u.id = HV_VMBUS_MESSAGE_CONNECTION_ID; /* * We retry to cope with transient failures caused by host side's * insufficient resources. 20 times should suffice in practice. */ for (retries = 0; retries < 20; retries++) { ret = hv_vmbus_post_msg_via_msg_ipc(connId, 1, buffer, bufferLen); if (ret == HV_STATUS_SUCCESS) return (0); pause_sbt("pstmsg", time, 0, C_HARDCLOCK); if (time < SBT_1S * 2) time *= 2; } KASSERT(ret == HV_STATUS_SUCCESS, ("Error VMBUS: Message Post Failed, ret=%d\n", ret)); return (EAGAIN); } /** * Send an event notification to the parent */ int hv_vmbus_set_event(hv_vmbus_channel *channel) { int ret = 0; uint32_t child_rel_id = channel->offer_msg.child_rel_id; /* Each uint32_t represents 32 channels */ synch_set_bit(child_rel_id & 31, (((uint32_t *)hv_vmbus_g_connection.send_interrupt_page + (child_rel_id >> 5)))); ret = hv_vmbus_signal_event(channel->signal_event_param); return (ret); } Index: projects/release-pkg/sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c =================================================================== --- projects/release-pkg/sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c (revision 297926) +++ projects/release-pkg/sys/dev/hyperv/vmbus/hv_vmbus_drv_freebsd.c (revision 297927) @@ -1,676 +1,656 @@ /*- * Copyright (c) 2009-2012 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * VM Bus Driver Implementation */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "hv_vmbus_priv.h" #include #include "acpi_if.h" static device_t vmbus_devp; static int vmbus_inited; static hv_setup_args setup_args; /* only CPU 0 supported at this time */ static char *vmbus_ids[] = { "VMBUS", NULL }; /** * @brief Software interrupt thread routine to handle channel messages from * the hypervisor. */ static void vmbus_msg_swintr(void *arg, int pending __unused) { int cpu; void* page_addr; hv_vmbus_channel_msg_header *hdr; hv_vmbus_channel_msg_table_entry *entry; hv_vmbus_channel_msg_type msg_type; hv_vmbus_message* msg; cpu = (int)(long)arg; KASSERT(cpu <= mp_maxid, ("VMBUS: vmbus_msg_swintr: " "cpu out of range!")); page_addr = hv_vmbus_g_context.syn_ic_msg_page[cpu]; msg = (hv_vmbus_message*) page_addr + HV_VMBUS_MESSAGE_SINT; for (;;) { if (msg->header.message_type == HV_MESSAGE_TYPE_NONE) break; /* no message */ hdr = (hv_vmbus_channel_msg_header *)msg->u.payload; msg_type = hdr->message_type; if (msg_type >= HV_CHANNEL_MESSAGE_COUNT) { printf("VMBUS: unknown message type = %d\n", msg_type); goto handled; } entry = &g_channel_message_table[msg_type]; if (entry->messageHandler) entry->messageHandler(hdr); handled: msg->header.message_type = HV_MESSAGE_TYPE_NONE; /* * Make sure the write to message_type (ie set to * HV_MESSAGE_TYPE_NONE) happens before we read the * message_pending and EOMing. Otherwise, the EOMing will * not deliver any more messages * since there is no empty slot * * NOTE: * mb() is used here, since atomic_thread_fence_seq_cst() * will become compiler fence on UP kernel. */ mb(); if (msg->header.message_flags.u.message_pending) { /* * This will cause message queue rescan to possibly * deliver another msg from the hypervisor */ wrmsr(HV_X64_MSR_EOM, 0); } } } /** * @brief Interrupt filter routine for VMBUS. * * The purpose of this routine is to determine the type of VMBUS protocol * message to process - an event or a channel message. */ static inline int hv_vmbus_isr(struct trapframe *frame) { int cpu; hv_vmbus_message* msg; - hv_vmbus_synic_event_flags* event; void* page_addr; cpu = PCPU_GET(cpuid); /* * The Windows team has advised that we check for events * before checking for messages. This is the way they do it * in Windows when running as a guest in Hyper-V */ - page_addr = hv_vmbus_g_context.syn_ic_event_page[cpu]; - event = (hv_vmbus_synic_event_flags*) - page_addr + HV_VMBUS_MESSAGE_SINT; - - if ((hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008) || - (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7)) { - /* Since we are a child, we only need to check bit 0 */ - if (synch_test_and_clear_bit(0, &event->flags32[0])) { - hv_vmbus_on_events(cpu); - } - } else { - /* - * On host with Win8 or above, we can directly look at - * the event page. If bit n is set, we have an interrupt - * on the channel with id n. - * Directly schedule the event software interrupt on - * current cpu. - */ - hv_vmbus_on_events(cpu); - } + hv_vmbus_on_events(cpu); /* Check if there are actual msgs to be process */ page_addr = hv_vmbus_g_context.syn_ic_msg_page[cpu]; msg = (hv_vmbus_message*) page_addr + HV_VMBUS_TIMER_SINT; /* we call eventtimer process the message */ if (msg->header.message_type == HV_MESSAGE_TIMER_EXPIRED) { msg->header.message_type = HV_MESSAGE_TYPE_NONE; /* call intrrupt handler of event timer */ hv_et_intr(frame); /* * Make sure the write to message_type (ie set to * HV_MESSAGE_TYPE_NONE) happens before we read the * message_pending and EOMing. Otherwise, the EOMing will * not deliver any more messages * since there is no empty slot * * NOTE: * mb() is used here, since atomic_thread_fence_seq_cst() * will become compiler fence on UP kernel. */ mb(); if (msg->header.message_flags.u.message_pending) { /* * This will cause message queue rescan to possibly * deliver another msg from the hypervisor */ wrmsr(HV_X64_MSR_EOM, 0); } } msg = (hv_vmbus_message*) page_addr + HV_VMBUS_MESSAGE_SINT; if (msg->header.message_type != HV_MESSAGE_TYPE_NONE) { taskqueue_enqueue(hv_vmbus_g_context.hv_msg_tq[cpu], &hv_vmbus_g_context.hv_msg_task[cpu]); } return (FILTER_HANDLED); } u_long *hv_vmbus_intr_cpu[MAXCPU]; void hv_vector_handler(struct trapframe *trap_frame) { int cpu; /* * Disable preemption. */ critical_enter(); /* * Do a little interrupt counting. */ cpu = PCPU_GET(cpuid); (*hv_vmbus_intr_cpu[cpu])++; hv_vmbus_isr(trap_frame); /* * Enable preemption. */ critical_exit(); } static int vmbus_read_ivar( device_t dev, device_t child, int index, uintptr_t* result) { struct hv_device *child_dev_ctx = device_get_ivars(child); switch (index) { case HV_VMBUS_IVAR_TYPE: *result = (uintptr_t) &child_dev_ctx->class_id; return (0); case HV_VMBUS_IVAR_INSTANCE: *result = (uintptr_t) &child_dev_ctx->device_id; return (0); case HV_VMBUS_IVAR_DEVCTX: *result = (uintptr_t) child_dev_ctx; return (0); case HV_VMBUS_IVAR_NODE: *result = (uintptr_t) child_dev_ctx->device; return (0); } return (ENOENT); } static int vmbus_write_ivar( device_t dev, device_t child, int index, uintptr_t value) { switch (index) { case HV_VMBUS_IVAR_TYPE: case HV_VMBUS_IVAR_INSTANCE: case HV_VMBUS_IVAR_DEVCTX: case HV_VMBUS_IVAR_NODE: /* read-only */ return (EINVAL); } return (ENOENT); } static int vmbus_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen) { char guidbuf[40]; struct hv_device *dev_ctx = device_get_ivars(child); strlcat(buf, "classid=", buflen); snprintf_hv_guid(guidbuf, sizeof(guidbuf), &dev_ctx->class_id); strlcat(buf, guidbuf, buflen); strlcat(buf, " deviceid=", buflen); snprintf_hv_guid(guidbuf, sizeof(guidbuf), &dev_ctx->device_id); strlcat(buf, guidbuf, buflen); return (0); } struct hv_device* hv_vmbus_child_device_create( hv_guid type, hv_guid instance, hv_vmbus_channel* channel) { hv_device* child_dev; /* * Allocate the new child device */ child_dev = malloc(sizeof(hv_device), M_DEVBUF, M_WAITOK | M_ZERO); child_dev->channel = channel; memcpy(&child_dev->class_id, &type, sizeof(hv_guid)); memcpy(&child_dev->device_id, &instance, sizeof(hv_guid)); return (child_dev); } int snprintf_hv_guid(char *buf, size_t sz, const hv_guid *guid) { int cnt; const unsigned char *d = guid->data; cnt = snprintf(buf, sz, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", d[3], d[2], d[1], d[0], d[5], d[4], d[7], d[6], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]); return (cnt); } int hv_vmbus_child_device_register(struct hv_device *child_dev) { device_t child; int ret = 0; if (bootverbose) { char name[40]; snprintf_hv_guid(name, sizeof(name), &child_dev->class_id); printf("VMBUS: Class ID: %s\n", name); } child = device_add_child(vmbus_devp, NULL, -1); child_dev->device = child; device_set_ivars(child, child_dev); mtx_lock(&Giant); ret = device_probe_and_attach(child); mtx_unlock(&Giant); return (0); } int hv_vmbus_child_device_unregister(struct hv_device *child_dev) { int ret = 0; /* * XXXKYS: Ensure that this is the opposite of * device_add_child() */ mtx_lock(&Giant); ret = device_delete_child(vmbus_devp, child_dev->device); mtx_unlock(&Giant); return(ret); } static int vmbus_probe(device_t dev) { if (ACPI_ID_PROBE(device_get_parent(dev), dev, vmbus_ids) == NULL || device_get_unit(dev) != 0) return (ENXIO); device_set_desc(dev, "Vmbus Devices"); return (BUS_PROBE_DEFAULT); } #ifdef HYPERV extern inthand_t IDTVEC(hv_vmbus_callback); #endif /** * @brief Main vmbus driver initialization routine. * * Here, we * - initialize the vmbus driver context * - setup various driver entry points * - invoke the vmbus hv main init routine * - get the irq resource * - invoke the vmbus to add the vmbus root device * - setup the vmbus root device * - retrieve the channel offers */ static int vmbus_bus_init(void) { int i, j, n, ret; char buf[MAXCOMLEN + 1]; cpuset_t cpu_mask; if (vmbus_inited) return (0); vmbus_inited = 1; ret = hv_vmbus_init(); if (ret) { if(bootverbose) printf("Error VMBUS: Hypervisor Initialization Failed!\n"); return (ret); } #ifdef HYPERV /* * Find a free IDT slot for vmbus callback. */ hv_vmbus_g_context.hv_cb_vector = lapic_ipi_alloc(IDTVEC(hv_vmbus_callback)); #else hv_vmbus_g_context.hv_cb_vector = -1; #endif if (hv_vmbus_g_context.hv_cb_vector < 0) { if(bootverbose) printf("Error VMBUS: Cannot find free IDT slot for " "vmbus callback!\n"); goto cleanup; } if(bootverbose) printf("VMBUS: vmbus callback vector %d\n", hv_vmbus_g_context.hv_cb_vector); /* * Notify the hypervisor of our vector. */ setup_args.vector = hv_vmbus_g_context.hv_cb_vector; CPU_FOREACH(j) { snprintf(buf, sizeof(buf), "cpu%d:hyperv", j); intrcnt_add(buf, &hv_vmbus_intr_cpu[j]); for (i = 0; i < 2; i++) setup_args.page_buffers[2 * j + i] = NULL; } /* * Per cpu setup. */ CPU_FOREACH(j) { /* * Setup taskqueue to handle events */ hv_vmbus_g_context.hv_event_queue[j] = taskqueue_create_fast("hyperv event", M_WAITOK, taskqueue_thread_enqueue, &hv_vmbus_g_context.hv_event_queue[j]); CPU_SETOF(j, &cpu_mask); taskqueue_start_threads_cpuset(&hv_vmbus_g_context.hv_event_queue[j], 1, PI_NET, &cpu_mask, "hvevent%d", j); /* * Setup per-cpu tasks and taskqueues to handle msg. */ hv_vmbus_g_context.hv_msg_tq[j] = taskqueue_create_fast( "hyperv msg", M_WAITOK, taskqueue_thread_enqueue, &hv_vmbus_g_context.hv_msg_tq[j]); CPU_SETOF(j, &cpu_mask); taskqueue_start_threads_cpuset(&hv_vmbus_g_context.hv_msg_tq[j], 1, PI_NET, &cpu_mask, "hvmsg%d", j); TASK_INIT(&hv_vmbus_g_context.hv_msg_task[j], 0, vmbus_msg_swintr, (void *)(long)j); /* * Prepare the per cpu msg and event pages to be called on each cpu. */ for(i = 0; i < 2; i++) { setup_args.page_buffers[2 * j + i] = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); } } if (bootverbose) printf("VMBUS: Calling smp_rendezvous, smp_started = %d\n", smp_started); smp_rendezvous(NULL, hv_vmbus_synic_init, NULL, &setup_args); /* * Connect to VMBus in the root partition */ ret = hv_vmbus_connect(); if (ret != 0) goto cleanup1; hv_vmbus_request_channel_offers(); return (ret); cleanup1: /* * Free pages alloc'ed */ for (n = 0; n < 2 * MAXCPU; n++) if (setup_args.page_buffers[n] != NULL) free(setup_args.page_buffers[n], M_DEVBUF); /* * remove swi and vmbus callback vector; */ CPU_FOREACH(j) { if (hv_vmbus_g_context.hv_event_queue[j] != NULL) { taskqueue_free(hv_vmbus_g_context.hv_event_queue[j]); hv_vmbus_g_context.hv_event_queue[j] = NULL; } } lapic_ipi_free(hv_vmbus_g_context.hv_cb_vector); cleanup: hv_vmbus_cleanup(); return (ret); } static int vmbus_attach(device_t dev) { if(bootverbose) device_printf(dev, "VMBUS: attach dev: %p\n", dev); vmbus_devp = dev; /* * If the system has already booted and thread * scheduling is possible indicated by the global * cold set to zero, we just call the driver * initialization directly. */ if (!cold) vmbus_bus_init(); return (0); } static void vmbus_init(void) { if (vm_guest != VM_GUEST_HV) return; /* * If the system has already booted and thread * scheduling is possible, as indicated by the * global cold set to zero, we just call the driver * initialization directly. */ if (!cold) vmbus_bus_init(); } static void vmbus_bus_exit(void) { int i; hv_vmbus_release_unattached_channels(); hv_vmbus_disconnect(); smp_rendezvous(NULL, hv_vmbus_synic_cleanup, NULL, NULL); for(i = 0; i < 2 * MAXCPU; i++) { if (setup_args.page_buffers[i] != NULL) free(setup_args.page_buffers[i], M_DEVBUF); } hv_vmbus_cleanup(); /* remove swi */ CPU_FOREACH(i) { if (hv_vmbus_g_context.hv_event_queue[i] != NULL) { taskqueue_free(hv_vmbus_g_context.hv_event_queue[i]); hv_vmbus_g_context.hv_event_queue[i] = NULL; } } lapic_ipi_free(hv_vmbus_g_context.hv_cb_vector); return; } static void vmbus_exit(void) { vmbus_bus_exit(); } static int vmbus_detach(device_t dev) { vmbus_exit(); return (0); } static void vmbus_mod_load(void) { if(bootverbose) printf("VMBUS: load\n"); } static void vmbus_mod_unload(void) { if(bootverbose) printf("VMBUS: unload\n"); } static int vmbus_modevent(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: vmbus_mod_load(); break; case MOD_UNLOAD: vmbus_mod_unload(); break; } return (0); } static device_method_t vmbus_methods[] = { /** Device interface */ DEVMETHOD(device_probe, vmbus_probe), DEVMETHOD(device_attach, vmbus_attach), DEVMETHOD(device_detach, vmbus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /** Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, vmbus_read_ivar), DEVMETHOD(bus_write_ivar, vmbus_write_ivar), DEVMETHOD(bus_child_pnpinfo_str, vmbus_child_pnpinfo_str), { 0, 0 } }; static char driver_name[] = "vmbus"; static driver_t vmbus_driver = { driver_name, vmbus_methods,0, }; devclass_t vmbus_devclass; DRIVER_MODULE(vmbus, acpi, vmbus_driver, vmbus_devclass, vmbus_modevent, 0); MODULE_DEPEND(vmbus, acpi, 1, 1, 1); MODULE_VERSION(vmbus, 1); /* We want to be started after SMP is initialized */ SYSINIT(vmb_init, SI_SUB_SMP + 1, SI_ORDER_FIRST, vmbus_init, NULL); Index: projects/release-pkg/sys/dev/hyperv =================================================================== --- projects/release-pkg/sys/dev/hyperv (revision 297926) +++ projects/release-pkg/sys/dev/hyperv (revision 297927) Property changes on: projects/release-pkg/sys/dev/hyperv ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/dev/hyperv:r297861-297926 Index: projects/release-pkg/sys/dev/isp/isp.c =================================================================== --- projects/release-pkg/sys/dev/isp/isp.c (revision 297926) +++ projects/release-pkg/sys/dev/isp/isp.c (revision 297927) @@ -1,8519 +1,8541 @@ /*- * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Machine and OS Independent (well, as best as possible) * code for the Qlogic ISP SCSI and FC-SCSI adapters. */ /* * Inspiration and ideas about this driver are from Erik Moe's Linux driver * (qlogicisp.c) and Dave Miller's SBus version of same (qlogicisp.c). Some * ideas dredged from the Solaris driver. */ /* * Include header file appropriate for platform we're building on. */ #ifdef __NetBSD__ #include __KERNEL_RCSID(0, "$NetBSD$"); #include #endif #ifdef __FreeBSD__ #include __FBSDID("$FreeBSD$"); #include #endif #ifdef __OpenBSD__ #include #endif #ifdef __linux__ #include "isp_linux.h" #endif #ifdef __svr4__ #include "isp_solaris.h" #endif /* * General defines */ #define MBOX_DELAY_COUNT 1000000 / 100 /* * Local static data */ static const char notresp[] = "Not RESPONSE in RESPONSE Queue (type 0x%x) @ idx %d (next %d) nlooked %d"; static const char bun[] = "bad underrun (count %d, resid %d, status %s)"; static const char lipd[] = "Chan %d LIP destroyed %d active commands"; static const char sacq[] = "unable to acquire scratch area"; static const uint8_t alpa_map[] = { 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6, 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca, 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5, 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9, 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97, 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79, 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56, 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17, 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01, 0x00 }; /* * Local function prototypes. */ static int isp_parse_async(ispsoftc_t *, uint16_t); static int isp_parse_async_fc(ispsoftc_t *, uint16_t); static int isp_handle_other_response(ispsoftc_t *, int, isphdr_t *, uint32_t *); static void isp_parse_status(ispsoftc_t *, ispstatusreq_t *, XS_T *, long *); static void isp_parse_status_24xx(ispsoftc_t *, isp24xx_statusreq_t *, XS_T *, long *); static void isp_fastpost_complete(ispsoftc_t *, uint32_t); static int isp_mbox_continue(ispsoftc_t *); static void isp_scsi_init(ispsoftc_t *); static void isp_scsi_channel_init(ispsoftc_t *, int); static void isp_fibre_init(ispsoftc_t *); static void isp_fibre_init_2400(ispsoftc_t *); static void isp_clear_portdb(ispsoftc_t *, int); static void isp_mark_portdb(ispsoftc_t *, int); static int isp_plogx(ispsoftc_t *, int, uint16_t, uint32_t, int); static int isp_port_login(ispsoftc_t *, uint16_t, uint32_t); static int isp_port_logout(ispsoftc_t *, uint16_t, uint32_t); static int isp_getpdb(ispsoftc_t *, int, uint16_t, isp_pdb_t *); static int isp_gethandles(ispsoftc_t *, int, uint16_t *, int *, int); static void isp_dump_chip_portdb(ispsoftc_t *, int); static uint64_t isp_get_wwn(ispsoftc_t *, int, int, int); static int isp_fclink_test(ispsoftc_t *, int, int); static int isp_pdb_sync(ispsoftc_t *, int); static int isp_scan_loop(ispsoftc_t *, int); static int isp_gid_ft_sns(ispsoftc_t *, int); static int isp_gid_ft_ct_passthru(ispsoftc_t *, int); static int isp_scan_fabric(ispsoftc_t *, int); static int isp_login_device(ispsoftc_t *, int, uint32_t, isp_pdb_t *, uint16_t *); static int isp_send_change_request(ispsoftc_t *, int); static int isp_register_fc4_type(ispsoftc_t *, int); static int isp_register_fc4_type_24xx(ispsoftc_t *, int); static int isp_register_fc4_features_24xx(ispsoftc_t *, int); static int isp_register_port_name_24xx(ispsoftc_t *, int); static int isp_register_node_name_24xx(ispsoftc_t *, int); static uint16_t isp_next_handle(ispsoftc_t *, uint16_t *); static int isp_fw_state(ispsoftc_t *, int); static void isp_mboxcmd_qnw(ispsoftc_t *, mbreg_t *, int); static void isp_mboxcmd(ispsoftc_t *, mbreg_t *); static void isp_spi_update(ispsoftc_t *, int); static void isp_setdfltsdparm(ispsoftc_t *); static void isp_setdfltfcparm(ispsoftc_t *, int); static int isp_read_nvram(ispsoftc_t *, int); static int isp_read_nvram_2400(ispsoftc_t *, uint8_t *); static void isp_rdnvram_word(ispsoftc_t *, int, uint16_t *); static void isp_rd_2400_nvram(ispsoftc_t *, uint32_t, uint32_t *); static void isp_parse_nvram_1020(ispsoftc_t *, uint8_t *); static void isp_parse_nvram_1080(ispsoftc_t *, int, uint8_t *); static void isp_parse_nvram_12160(ispsoftc_t *, int, uint8_t *); static void isp_parse_nvram_2100(ispsoftc_t *, uint8_t *); static void isp_parse_nvram_2400(ispsoftc_t *, uint8_t *); static void isp_change_fw_state(ispsoftc_t *isp, int chan, int state) { fcparam *fcp = FCPARAM(isp, chan); if (fcp->isp_fwstate == state) return; isp_prt(isp, ISP_LOGCONFIG|ISP_LOG_SANCFG, "Chan %d Firmware state <%s->%s>", chan, isp_fc_fw_statename(fcp->isp_fwstate), isp_fc_fw_statename(state)); fcp->isp_fwstate = state; } /* * Reset Hardware. * * Hit the chip over the head, download new f/w if available and set it running. * * Locking done elsewhere. */ void isp_reset(ispsoftc_t *isp, int do_load_defaults) { mbreg_t mbs; char *buf; uint64_t fwt; uint32_t code_org, val; int loops, i, dodnld = 1; const char *btype = "????"; static const char dcrc[] = "Downloaded RISC Code Checksum Failure"; isp->isp_state = ISP_NILSTATE; if (isp->isp_dead) { isp_shutdown(isp); ISP_DISABLE_INTS(isp); return; } /* * Basic types (SCSI, FibreChannel and PCI or SBus) * have been set in the MD code. We figure out more * here. Possibly more refined types based upon PCI * identification. Chip revision has been gathered. * * After we've fired this chip up, zero out the conf1 register * for SCSI adapters and do other settings for the 2100. */ ISP_DISABLE_INTS(isp); /* * Pick an initial maxcmds value which will be used * to allocate xflist pointer space. It may be changed * later by the firmware. */ if (IS_24XX(isp)) { isp->isp_maxcmds = 4096; } else if (IS_2322(isp)) { isp->isp_maxcmds = 2048; } else if (IS_23XX(isp) || IS_2200(isp)) { isp->isp_maxcmds = 1024; } else { isp->isp_maxcmds = 512; } /* * Set up DMA for the request and response queues. * * We do this now so we can use the request queue * for dma to load firmware from. */ if (ISP_MBOXDMASETUP(isp) != 0) { isp_prt(isp, ISP_LOGERR, "Cannot setup DMA"); return; } /* * Set up default request/response queue in-pointer/out-pointer * register indices. */ if (IS_24XX(isp)) { isp->isp_rqstinrp = BIU2400_REQINP; isp->isp_rqstoutrp = BIU2400_REQOUTP; isp->isp_respinrp = BIU2400_RSPINP; isp->isp_respoutrp = BIU2400_RSPOUTP; } else if (IS_23XX(isp)) { isp->isp_rqstinrp = BIU_REQINP; isp->isp_rqstoutrp = BIU_REQOUTP; isp->isp_respinrp = BIU_RSPINP; isp->isp_respoutrp = BIU_RSPOUTP; } else { isp->isp_rqstinrp = INMAILBOX4; isp->isp_rqstoutrp = OUTMAILBOX4; isp->isp_respinrp = OUTMAILBOX5; isp->isp_respoutrp = INMAILBOX5; } /* * Put the board into PAUSE mode (so we can read the SXP registers * or write FPM/FBM registers). */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_HOST_INT); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_PAUSE); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); } if (IS_FC(isp)) { switch (isp->isp_type) { case ISP_HA_FC_2100: btype = "2100"; break; case ISP_HA_FC_2200: btype = "2200"; break; case ISP_HA_FC_2300: btype = "2300"; break; case ISP_HA_FC_2312: btype = "2312"; break; case ISP_HA_FC_2322: btype = "2322"; break; case ISP_HA_FC_2400: btype = "2422"; break; case ISP_HA_FC_2500: btype = "2532"; break; case ISP_HA_FC_2600: btype = "2031"; break; default: break; } if (!IS_24XX(isp)) { /* * While we're paused, reset the FPM module and FBM * fifos. */ ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } else if (IS_1240(isp)) { sdparam *sdp; btype = "1240"; isp->isp_clock = 60; sdp = SDPARAM(isp, 0); sdp->isp_ultramode = 1; sdp = SDPARAM(isp, 1); sdp->isp_ultramode = 1; /* * XXX: Should probably do some bus sensing. */ } else if (IS_ULTRA3(isp)) { sdparam *sdp = isp->isp_param; isp->isp_clock = 100; if (IS_10160(isp)) btype = "10160"; else if (IS_12160(isp)) btype = "12160"; else btype = ""; sdp->isp_lvdmode = 1; if (IS_DUALBUS(isp)) { sdp++; sdp->isp_lvdmode = 1; } } else if (IS_ULTRA2(isp)) { static const char m[] = "bus %d is in %s Mode"; uint16_t l; sdparam *sdp = SDPARAM(isp, 0); isp->isp_clock = 100; if (IS_1280(isp)) btype = "1280"; else if (IS_1080(isp)) btype = "1080"; else btype = ""; l = ISP_READ(isp, SXP_PINS_DIFF) & ISP1080_MODE_MASK; switch (l) { case ISP1080_LVD_MODE: sdp->isp_lvdmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "LVD"); break; case ISP1080_HVD_MODE: sdp->isp_diffmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "Differential"); break; case ISP1080_SE_MODE: sdp->isp_ultramode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "Single-Ended"); break; default: isp_prt(isp, ISP_LOGERR, "unknown mode on bus %d (0x%x)", 0, l); break; } if (IS_DUALBUS(isp)) { sdp = SDPARAM(isp, 1); l = ISP_READ(isp, SXP_PINS_DIFF|SXP_BANK1_SELECT); l &= ISP1080_MODE_MASK; switch (l) { case ISP1080_LVD_MODE: sdp->isp_lvdmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "LVD"); break; case ISP1080_HVD_MODE: sdp->isp_diffmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "Differential"); break; case ISP1080_SE_MODE: sdp->isp_ultramode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "Single-Ended"); break; default: isp_prt(isp, ISP_LOGERR, "unknown mode on bus %d (0x%x)", 1, l); break; } } } else { sdparam *sdp = SDPARAM(isp, 0); i = ISP_READ(isp, BIU_CONF0) & BIU_CONF0_HW_MASK; switch (i) { default: isp_prt(isp, ISP_LOGALL, "Unknown Chip Type 0x%x", i); /* FALLTHROUGH */ case 1: btype = "1020"; isp->isp_type = ISP_HA_SCSI_1020; isp->isp_clock = 40; break; case 2: /* * Some 1020A chips are Ultra Capable, but don't * run the clock rate up for that unless told to * do so by the Ultra Capable bits being set. */ btype = "1020A"; isp->isp_type = ISP_HA_SCSI_1020A; isp->isp_clock = 40; break; case 3: btype = "1040"; isp->isp_type = ISP_HA_SCSI_1040; isp->isp_clock = 60; break; case 4: btype = "1040A"; isp->isp_type = ISP_HA_SCSI_1040A; isp->isp_clock = 60; break; case 5: btype = "1040B"; isp->isp_type = ISP_HA_SCSI_1040B; isp->isp_clock = 60; break; case 6: btype = "1040C"; isp->isp_type = ISP_HA_SCSI_1040C; isp->isp_clock = 60; break; } /* * Now, while we're at it, gather info about ultra * and/or differential mode. */ if (ISP_READ(isp, SXP_PINS_DIFF) & SXP_PINS_DIFF_MODE) { isp_prt(isp, ISP_LOGCONFIG, "Differential Mode"); sdp->isp_diffmode = 1; } else { sdp->isp_diffmode = 0; } i = ISP_READ(isp, RISC_PSR); if (isp->isp_bustype == ISP_BT_SBUS) { i &= RISC_PSR_SBUS_ULTRA; } else { i &= RISC_PSR_PCI_ULTRA; } if (i != 0) { isp_prt(isp, ISP_LOGCONFIG, "Ultra Mode Capable"); sdp->isp_ultramode = 1; /* * If we're in Ultra Mode, we have to be 60MHz clock- * even for the SBus version. */ isp->isp_clock = 60; } else { sdp->isp_ultramode = 0; /* * Clock is known. Gronk. */ } /* * Machine dependent clock (if set) overrides * our generic determinations. */ if (isp->isp_mdvec->dv_clock) { if (isp->isp_mdvec->dv_clock < isp->isp_clock) { isp->isp_clock = isp->isp_mdvec->dv_clock; } } } /* * Clear instrumentation */ isp->isp_intcnt = isp->isp_intbogus = 0; /* * Do MD specific pre initialization */ ISP_RESET0(isp); /* * Hit the chip over the head with hammer, * and give it a chance to recover. */ if (IS_SCSI(isp)) { ISP_WRITE(isp, BIU_ICR, BIU_ICR_SOFT_RESET); /* * A slight delay... */ ISP_DELAY(100); /* * Clear data && control DMA engines. */ ISP_WRITE(isp, CDMA_CONTROL, DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); ISP_WRITE(isp, DDMA_CONTROL, DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); } else if (IS_24XX(isp)) { /* * Stop DMA and wait for it to stop. */ ISP_WRITE(isp, BIU2400_CSR, BIU2400_DMA_STOP|(3 << 4)); for (val = loops = 0; loops < 30000; loops++) { ISP_DELAY(10); val = ISP_READ(isp, BIU2400_CSR); if ((val & BIU2400_DMA_ACTIVE) == 0) { break; } } if (val & BIU2400_DMA_ACTIVE) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "DMA Failed to Stop on Reset"); return; } /* * Hold it in SOFT_RESET and STOP state for 100us. */ ISP_WRITE(isp, BIU2400_CSR, BIU2400_SOFT_RESET|BIU2400_DMA_STOP|(3 << 4)); ISP_DELAY(100); for (loops = 0; loops < 10000; loops++) { ISP_DELAY(5); val = ISP_READ(isp, OUTMAILBOX0); } for (val = loops = 0; loops < 500000; loops ++) { val = ISP_READ(isp, BIU2400_CSR); if ((val & BIU2400_SOFT_RESET) == 0) { break; } } if (val & BIU2400_SOFT_RESET) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "Failed to come out of reset"); return; } } else { ISP_WRITE(isp, BIU2100_CSR, BIU2100_SOFT_RESET); /* * A slight delay... */ ISP_DELAY(100); /* * Clear data && control DMA engines. */ ISP_WRITE(isp, CDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); ISP_WRITE(isp, TDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); ISP_WRITE(isp, RDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); } /* * Wait for ISP to be ready to go... */ loops = MBOX_DELAY_COUNT; for (;;) { if (IS_SCSI(isp)) { if (!(ISP_READ(isp, BIU_ICR) & BIU_ICR_SOFT_RESET)) { break; } } else if (IS_24XX(isp)) { if (ISP_READ(isp, OUTMAILBOX0) == 0) { break; } } else { if (!(ISP_READ(isp, BIU2100_CSR) & BIU2100_SOFT_RESET)) break; } ISP_DELAY(100); if (--loops < 0) { ISP_DUMPREGS(isp, "chip reset timed out"); ISP_RESET0(isp); return; } } /* * After we've fired this chip up, zero out the conf1 register * for SCSI adapters and other settings for the 2100. */ if (IS_SCSI(isp)) { ISP_WRITE(isp, BIU_CONF1, 0); } else if (!IS_24XX(isp)) { ISP_WRITE(isp, BIU2100_CSR, 0); } /* * Reset RISC Processor */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RELEASE); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RESET); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); ISP_DELAY(100); ISP_WRITE(isp, BIU_SEMA, 0); } /* * Post-RISC Reset stuff. */ if (IS_24XX(isp)) { for (val = loops = 0; loops < 5000000; loops++) { ISP_DELAY(5); val = ISP_READ(isp, OUTMAILBOX0); if (val == 0) { break; } } if (val != 0) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "reset didn't clear"); return; } } else if (IS_SCSI(isp)) { uint16_t tmp = isp->isp_mdvec->dv_conf1; /* * Busted FIFO. Turn off all but burst enables. */ if (isp->isp_type == ISP_HA_SCSI_1040A) { tmp &= BIU_BURST_ENABLE; } ISP_SETBITS(isp, BIU_CONF1, tmp); if (tmp & BIU_BURST_ENABLE) { ISP_SETBITS(isp, CDMA_CONF, DMA_ENABLE_BURST); ISP_SETBITS(isp, DDMA_CONF, DMA_ENABLE_BURST); } if (SDPARAM(isp, 0)->isp_ptisp) { if (SDPARAM(isp, 0)->isp_ultramode) { while (ISP_READ(isp, RISC_MTR) != 0x1313) { ISP_WRITE(isp, RISC_MTR, 0x1313); ISP_WRITE(isp, HCCR, HCCR_CMD_STEP); } } else { ISP_WRITE(isp, RISC_MTR, 0x1212); } /* * PTI specific register */ ISP_WRITE(isp, RISC_EMB, DUAL_BANK); } else { ISP_WRITE(isp, RISC_MTR, 0x1212); } ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } else { ISP_WRITE(isp, RISC_MTR2100, 0x1212); if (IS_2200(isp) || IS_23XX(isp)) { ISP_WRITE(isp, HCCR, HCCR_2X00_DISABLE_PARITY_PAUSE); } ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } ISP_WRITE(isp, isp->isp_rqstinrp, 0); ISP_WRITE(isp, isp->isp_rqstoutrp, 0); ISP_WRITE(isp, isp->isp_respinrp, 0); ISP_WRITE(isp, isp->isp_respoutrp, 0); if (IS_24XX(isp)) { if (!IS_26XX(isp)) { ISP_WRITE(isp, BIU2400_PRI_REQINP, 0); ISP_WRITE(isp, BIU2400_PRI_REQOUTP, 0); } ISP_WRITE(isp, BIU2400_ATIO_RSPINP, 0); ISP_WRITE(isp, BIU2400_ATIO_RSPOUTP, 0); } /* * Do MD specific post initialization */ ISP_RESET1(isp); /* * Wait for everything to finish firing up. * * Avoid doing this on early 2312s because you can generate a PCI * parity error (chip breakage). */ if (IS_2312(isp) && isp->isp_revision < 2) { ISP_DELAY(100); } else { loops = MBOX_DELAY_COUNT; while (ISP_READ(isp, OUTMAILBOX0) == MBOX_BUSY) { ISP_DELAY(100); if (--loops < 0) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "MBOX_BUSY never cleared on reset"); return; } } } /* * Up until this point we've done everything by just reading or * setting registers. From this point on we rely on at least *some* * kind of firmware running in the card. */ /* * Do some sanity checking by running a NOP command. * If it succeeds, the ROM firmware is now running. */ MBSINIT(&mbs, MBOX_NO_OP, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "NOP command failed (%x)", mbs.param[0]); ISP_RESET0(isp); return; } /* * Do some operational tests */ if (IS_SCSI(isp) || IS_24XX(isp)) { static const uint16_t patterns[MAX_MAILBOX] = { 0x0000, 0xdead, 0xbeef, 0xffff, 0xa5a5, 0x5a5a, 0x7f7f, 0x7ff7, 0x3421, 0xabcd, 0xdcba, 0xfeef, 0xbead, 0xdebe, 0x2222, 0x3333, 0x5555, 0x6666, 0x7777, 0xaaaa, 0xffff, 0xdddd, 0x9999, 0x1fbc, 0x6666, 0x6677, 0x1122, 0x33ff, 0x0000, 0x0001, 0x1000, 0x1010, }; int nmbox = ISP_NMBOX(isp); if (IS_SCSI(isp)) nmbox = 6; MBSINIT(&mbs, MBOX_MAILBOX_REG_TEST, MBLOGALL, 0); for (i = 1; i < nmbox; i++) { mbs.param[i] = patterns[i]; } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } for (i = 1; i < nmbox; i++) { if (mbs.param[i] != patterns[i]) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "Register Test Failed at Register %d: should have 0x%04x but got 0x%04x", i, patterns[i], mbs.param[i]); return; } } } /* * Download new Firmware, unless requested not to do so. * This is made slightly trickier in some cases where the * firmware of the ROM revision is newer than the revision * compiled into the driver. So, where we used to compare * versions of our f/w and the ROM f/w, now we just see * whether we have f/w at all and whether a config flag * has disabled our download. */ if ((isp->isp_mdvec->dv_ispfw == NULL) || (isp->isp_confopts & ISP_CFG_NORELOAD)) { dodnld = 0; } if (IS_24XX(isp)) { code_org = ISP_CODE_ORG_2400; } else if (IS_23XX(isp)) { code_org = ISP_CODE_ORG_2300; } else { code_org = ISP_CODE_ORG; } isp->isp_loaded_fw = 0; if (dodnld && IS_24XX(isp)) { const uint32_t *ptr = isp->isp_mdvec->dv_ispfw; int wordload; /* * Keep loading until we run out of f/w. */ code_org = ptr[2]; /* 1st load address is our start addr */ wordload = 0; for (;;) { uint32_t la, wi, wl; isp_prt(isp, ISP_LOGDEBUG0, "load 0x%x words of code at load address 0x%x", ptr[3], ptr[2]); wi = 0; la = ptr[2]; wl = ptr[3]; while (wi < ptr[3]) { uint32_t *cp; uint32_t nw; nw = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) >> 2; if (nw > wl) { nw = wl; } cp = isp->isp_rquest; for (i = 0; i < nw; i++) { ISP_IOXPUT_32(isp, ptr[wi++], &cp[i]); wl--; } MEMORYBARRIER(isp, SYNC_REQUEST, 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), -1); again: MBSINIT(&mbs, 0, MBLOGALL, 0); if (la < 0x10000 && nw < 0x10000) { mbs.param[0] = MBOX_LOAD_RISC_RAM_2100; mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); isp_prt(isp, ISP_LOGDEBUG0, "LOAD RISC RAM 2100 %u words at load address 0x%x", nw, la); } else if (wordload) { union { const uint32_t *cp; uint32_t *np; } ucd; ucd.cp = (const uint32_t *)cp; mbs.param[0] = MBOX_WRITE_RAM_WORD_EXTENDED; mbs.param[1] = la; mbs.param[2] = (*ucd.np); mbs.param[3] = (*ucd.np) >> 16; mbs.param[8] = la >> 16; isp->isp_mbxwrk0 = nw - 1; isp->isp_mbxworkp = ucd.np+1; isp->isp_mbxwrk1 = (la + 1); isp->isp_mbxwrk8 = (la + 1) >> 16; isp_prt(isp, ISP_LOGDEBUG0, "WRITE RAM WORD EXTENDED %u words at load address 0x%x", nw, la); } else { mbs.param[0] = MBOX_LOAD_RISC_RAM; mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw >> 16; mbs.param[5] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); mbs.param[8] = la >> 16; isp_prt(isp, ISP_LOGDEBUG0, "LOAD RISC RAM %u words at load address 0x%x", nw, la); } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { if (mbs.param[0] == MBOX_HOST_INTERFACE_ERROR) { isp_prt(isp, ISP_LOGERR, "switching to word load"); wordload = 1; goto again; } isp_prt(isp, ISP_LOGERR, "F/W Risc Ram Load Failed"); ISP_RESET0(isp); return; } la += nw; } if (ptr[1] == 0) { break; } ptr += ptr[3]; } isp->isp_loaded_fw = 1; } else if (dodnld && IS_23XX(isp)) { const uint16_t *ptr = isp->isp_mdvec->dv_ispfw; uint16_t wi, wl, segno; uint32_t la; la = code_org; segno = 0; for (;;) { uint32_t nxtaddr; isp_prt(isp, ISP_LOGDEBUG0, "load 0x%x words of code at load address 0x%x", ptr[3], la); wi = 0; wl = ptr[3]; while (wi < ptr[3]) { uint16_t *cp; uint16_t nw; nw = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) >> 1; if (nw > wl) { nw = wl; } if (nw > (1 << 15)) { nw = 1 << 15; } cp = isp->isp_rquest; for (i = 0; i < nw; i++) { ISP_IOXPUT_16(isp, ptr[wi++], &cp[i]); wl--; } MEMORYBARRIER(isp, SYNC_REQUEST, 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), -1); MBSINIT(&mbs, 0, MBLOGALL, 0); if (la < 0x10000) { mbs.param[0] = MBOX_LOAD_RISC_RAM_2100; mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); isp_prt(isp, ISP_LOGDEBUG1, "LOAD RISC RAM 2100 %u words at load address 0x%x\n", nw, la); } else { mbs.param[0] = MBOX_LOAD_RISC_RAM; mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); mbs.param[8] = la >> 16; isp_prt(isp, ISP_LOGDEBUG1, "LOAD RISC RAM %u words at load address 0x%x\n", nw, la); } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "F/W Risc Ram Load Failed"); ISP_RESET0(isp); return; } la += nw; } if (!IS_2322(isp)) { break; } if (++segno == 3) { break; } /* * If we're a 2322, the firmware actually comes in * three chunks. We loaded the first at the code_org * address. The other two chunks, which follow right * after each other in memory here, get loaded at * addresses specfied at offset 0x9..0xB. */ nxtaddr = ptr[3]; ptr = &ptr[nxtaddr]; la = ptr[5] | ((ptr[4] & 0x3f) << 16); } isp->isp_loaded_fw = 1; } else if (dodnld) { union { const uint16_t *cp; uint16_t *np; } ucd; ucd.cp = isp->isp_mdvec->dv_ispfw; isp->isp_mbxworkp = &ucd.np[1]; isp->isp_mbxwrk0 = ucd.np[3] - 1; isp->isp_mbxwrk1 = code_org + 1; MBSINIT(&mbs, MBOX_WRITE_RAM_WORD, MBLOGNONE, 0); mbs.param[1] = code_org; mbs.param[2] = ucd.np[0]; isp_prt(isp, ISP_LOGDEBUG1, "WRITE RAM %u words at load address 0x%x", ucd.np[3], code_org); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "F/W download failed at word %d", isp->isp_mbxwrk1 - code_org); ISP_RESET0(isp); return; } } else if (IS_26XX(isp)) { MBSINIT(&mbs, MBOX_LOAD_FLASH_FIRMWARE, MBLOGALL, 5000000); mbs.ibitm = 0x01; mbs.obitm = 0x07; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "Flash F/W load failed"); ISP_RESET0(isp); return; } } else { isp_prt(isp, ISP_LOGDEBUG2, "skipping f/w download"); } /* * If we loaded firmware, verify its checksum */ if (isp->isp_loaded_fw) { MBSINIT(&mbs, MBOX_VERIFY_CHECKSUM, MBLOGNONE, 0); if (IS_24XX(isp)) { mbs.param[1] = code_org >> 16; mbs.param[2] = code_org; } else { mbs.param[1] = code_org; } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, dcrc); ISP_RESET0(isp); return; } } /* * Now start it rolling. * * If we didn't actually download f/w, * we still need to (re)start it. */ MBSINIT(&mbs, MBOX_EXEC_FIRMWARE, MBLOGALL, 5000000); if (IS_24XX(isp)) { mbs.param[1] = code_org >> 16; mbs.param[2] = code_org; if (isp->isp_loaded_fw) { mbs.param[3] = 0; } else { mbs.param[3] = 1; } } else if (IS_2322(isp)) { mbs.param[1] = code_org; if (isp->isp_loaded_fw) { mbs.param[2] = 0; } else { mbs.param[2] = 1; } } else { mbs.param[1] = code_org; } isp_mboxcmd(isp, &mbs); if (IS_2322(isp) || IS_24XX(isp)) { if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } } if (IS_SCSI(isp)) { /* * Set CLOCK RATE, but only if asked to. */ if (isp->isp_clock) { MBSINIT(&mbs, MBOX_SET_CLOCK_RATE, MBLOGALL, 0); mbs.param[1] = isp->isp_clock; isp_mboxcmd(isp, &mbs); /* we will try not to care if this fails */ } } /* * Ask the chip for the current firmware version. * This should prove that the new firmware is working. */ MBSINIT(&mbs, MBOX_ABOUT_FIRMWARE, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } /* * The SBus firmware that we are using apparently does not return * major, minor, micro revisions in the mailbox registers, which * is really, really, annoying. */ if (ISP_SBUS_SUPPORTED && isp->isp_bustype == ISP_BT_SBUS) { if (dodnld) { #ifdef ISP_TARGET_MODE isp->isp_fwrev[0] = 7; isp->isp_fwrev[1] = 55; #else isp->isp_fwrev[0] = 1; isp->isp_fwrev[1] = 37; #endif isp->isp_fwrev[2] = 0; } } else { isp->isp_fwrev[0] = mbs.param[1]; isp->isp_fwrev[1] = mbs.param[2]; isp->isp_fwrev[2] = mbs.param[3]; } if (IS_FC(isp)) { /* * We do not believe firmware attributes for 2100 code less * than 1.17.0, unless it's the firmware we specifically * are loading. * * Note that all 22XX and later f/w is greater than 1.X.0. */ if ((ISP_FW_OLDER_THAN(isp, 1, 17, 1))) { #ifdef USE_SMALLER_2100_FIRMWARE isp->isp_fwattr = ISP_FW_ATTR_SCCLUN; #else isp->isp_fwattr = 0; #endif } else { isp->isp_fwattr = mbs.param[6]; } if (IS_24XX(isp)) { isp->isp_fwattr |= ((uint64_t) mbs.param[15]) << 16; if (isp->isp_fwattr & ISP2400_FW_ATTR_EXTNDED) { isp->isp_fwattr |= (((uint64_t) mbs.param[16]) << 32) | (((uint64_t) mbs.param[17]) << 48); } } } else { isp->isp_fwattr = 0; } isp_prt(isp, ISP_LOGCONFIG, "Board Type %s, Chip Revision 0x%x, %s F/W Revision %d.%d.%d", btype, isp->isp_revision, dodnld? "loaded" : "resident", isp->isp_fwrev[0], isp->isp_fwrev[1], isp->isp_fwrev[2]); fwt = isp->isp_fwattr; if (IS_24XX(isp)) { buf = FCPARAM(isp, 0)->isp_scratch; ISP_SNPRINTF(buf, ISP_FC_SCRLEN, "Attributes:"); if (fwt & ISP2400_FW_ATTR_CLASS2) { fwt ^=ISP2400_FW_ATTR_CLASS2; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s Class2", buf); } if (fwt & ISP2400_FW_ATTR_IP) { fwt ^=ISP2400_FW_ATTR_IP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s IP", buf); } if (fwt & ISP2400_FW_ATTR_MULTIID) { fwt ^=ISP2400_FW_ATTR_MULTIID; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s MultiID", buf); } if (fwt & ISP2400_FW_ATTR_SB2) { fwt ^=ISP2400_FW_ATTR_SB2; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s SB2", buf); } if (fwt & ISP2400_FW_ATTR_T10CRC) { fwt ^=ISP2400_FW_ATTR_T10CRC; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s T10CRC", buf); } if (fwt & ISP2400_FW_ATTR_VI) { fwt ^=ISP2400_FW_ATTR_VI; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VI", buf); } if (fwt & ISP2400_FW_ATTR_MQ) { fwt ^=ISP2400_FW_ATTR_MQ; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s MQ", buf); } if (fwt & ISP2400_FW_ATTR_MSIX) { fwt ^=ISP2400_FW_ATTR_MSIX; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s MSIX", buf); } if (fwt & ISP2400_FW_ATTR_FCOE) { fwt ^=ISP2400_FW_ATTR_FCOE; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s FCOE", buf); } if (fwt & ISP2400_FW_ATTR_VP0) { fwt ^= ISP2400_FW_ATTR_VP0; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VP0_Decoupling", buf); } if (fwt & ISP2400_FW_ATTR_EXPFW) { fwt ^= ISP2400_FW_ATTR_EXPFW; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s (Experimental)", buf); } if (fwt & ISP2400_FW_ATTR_HOTFW) { fwt ^= ISP2400_FW_ATTR_HOTFW; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s HotFW", buf); } fwt &= ~ISP2400_FW_ATTR_EXTNDED; if (fwt & ISP2400_FW_ATTR_EXTVP) { fwt ^= ISP2400_FW_ATTR_EXTVP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s ExtVP", buf); } if (fwt & ISP2400_FW_ATTR_VN2VN) { fwt ^= ISP2400_FW_ATTR_VN2VN; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VN2VN", buf); } if (fwt & ISP2400_FW_ATTR_EXMOFF) { fwt ^= ISP2400_FW_ATTR_EXMOFF; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s EXMOFF", buf); } if (fwt & ISP2400_FW_ATTR_NPMOFF) { fwt ^= ISP2400_FW_ATTR_NPMOFF; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s NPMOFF", buf); } if (fwt & ISP2400_FW_ATTR_DIFCHOP) { fwt ^= ISP2400_FW_ATTR_DIFCHOP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s DIFCHOP", buf); } if (fwt & ISP2400_FW_ATTR_SRIOV) { fwt ^= ISP2400_FW_ATTR_SRIOV; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s SRIOV", buf); } if (fwt & ISP2400_FW_ATTR_ASICTMP) { fwt ^= ISP2400_FW_ATTR_ASICTMP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s ASICTMP", buf); } if (fwt & ISP2400_FW_ATTR_ATIOMQ) { fwt ^= ISP2400_FW_ATTR_ATIOMQ; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s ATIOMQ", buf); } if (fwt) { ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s (unknown 0x%08x%08x)", buf, (uint32_t) (fwt >> 32), (uint32_t) fwt); } isp_prt(isp, ISP_LOGCONFIG, "%s", buf); } else if (IS_FC(isp)) { buf = FCPARAM(isp, 0)->isp_scratch; ISP_SNPRINTF(buf, ISP_FC_SCRLEN, "Attributes:"); if (fwt & ISP_FW_ATTR_TMODE) { fwt ^=ISP_FW_ATTR_TMODE; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s TargetMode", buf); } if (fwt & ISP_FW_ATTR_SCCLUN) { fwt ^=ISP_FW_ATTR_SCCLUN; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s SCC-Lun", buf); } if (fwt & ISP_FW_ATTR_FABRIC) { fwt ^=ISP_FW_ATTR_FABRIC; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s Fabric", buf); } if (fwt & ISP_FW_ATTR_CLASS2) { fwt ^=ISP_FW_ATTR_CLASS2; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s Class2", buf); } if (fwt & ISP_FW_ATTR_FCTAPE) { fwt ^=ISP_FW_ATTR_FCTAPE; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s FC-Tape", buf); } if (fwt & ISP_FW_ATTR_IP) { fwt ^=ISP_FW_ATTR_IP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s IP", buf); } if (fwt & ISP_FW_ATTR_VI) { fwt ^=ISP_FW_ATTR_VI; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VI", buf); } if (fwt & ISP_FW_ATTR_VI_SOLARIS) { fwt ^=ISP_FW_ATTR_VI_SOLARIS; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VI_SOLARIS", buf); } if (fwt & ISP_FW_ATTR_2KLOGINS) { fwt ^=ISP_FW_ATTR_2KLOGINS; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s 2K-Login", buf); } if (fwt != 0) { ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s (unknown 0x%08x%08x)", buf, (uint32_t) (fwt >> 32), (uint32_t) fwt); } isp_prt(isp, ISP_LOGCONFIG, "%s", buf); } if (IS_24XX(isp)) { MBSINIT(&mbs, MBOX_GET_RESOURCE_COUNT, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } if (isp->isp_maxcmds >= mbs.param[3]) { isp->isp_maxcmds = mbs.param[3]; } } else { MBSINIT(&mbs, MBOX_GET_FIRMWARE_STATUS, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } if (isp->isp_maxcmds >= mbs.param[2]) { isp->isp_maxcmds = mbs.param[2]; } } isp_prt(isp, ISP_LOGCONFIG, "%d max I/O command limit set", isp->isp_maxcmds); /* * If we don't have Multi-ID f/w loaded, we need to restrict channels to one. * Only make this check for non-SCSI cards (I'm not sure firmware attributes * work for them). */ if (IS_FC(isp) && isp->isp_nchan > 1) { if (!ISP_CAP_MULTI_ID(isp)) { isp_prt(isp, ISP_LOGWARN, "non-MULTIID f/w loaded, " "only can enable 1 of %d channels", isp->isp_nchan); isp->isp_nchan = 1; } else if (!ISP_CAP_VP0(isp)) { isp_prt(isp, ISP_LOGWARN, "We can not use MULTIID " "feature properly without VP0_Decoupling"); isp->isp_nchan = 1; } } if (IS_FC(isp)) { for (i = 0; i < isp->isp_nchan; i++) isp_change_fw_state(isp, i, FW_CONFIG_WAIT); } if (isp->isp_dead) { isp_shutdown(isp); ISP_DISABLE_INTS(isp); return; } isp->isp_state = ISP_RESETSTATE; /* * Okay- now that we have new firmware running, we now (re)set our * notion of how many luns we support. This is somewhat tricky because * if we haven't loaded firmware, we sometimes do not have an easy way * of knowing how many luns we support. * * Expanded lun firmware gives you 32 luns for SCSI cards and * 16384 luns for Fibre Channel cards. * * It turns out that even for QLogic 2100s with ROM 1.10 and above * we do get a firmware attributes word returned in mailbox register 6. * * Because the lun is in a different position in the Request Queue * Entry structure for Fibre Channel with expanded lun firmware, we * can only support one lun (lun zero) when we don't know what kind * of firmware we're running. */ if (IS_SCSI(isp)) { if (dodnld) { if (IS_ULTRA2(isp) || IS_ULTRA3(isp)) { isp->isp_maxluns = 32; } else { isp->isp_maxluns = 8; } } else { isp->isp_maxluns = 8; } } else { if (ISP_CAP_SCCFW(isp)) { isp->isp_maxluns = 0; /* No limit -- 2/8 bytes */ } else { isp->isp_maxluns = 16; } } /* * We get some default values established. As a side * effect, NVRAM is read here (unless overriden by * a configuration flag). */ if (do_load_defaults) { if (IS_SCSI(isp)) { isp_setdfltsdparm(isp); } else { for (i = 0; i < isp->isp_nchan; i++) { isp_setdfltfcparm(isp, i); } } } } /* * Clean firmware shutdown. */ static int isp_deinit(ispsoftc_t *isp) { mbreg_t mbs; isp->isp_state = ISP_NILSTATE; MBSINIT(&mbs, MBOX_STOP_FIRMWARE, MBLOGALL, 500000); mbs.param[1] = 0; mbs.param[2] = 0; mbs.param[3] = 0; mbs.param[4] = 0; mbs.param[5] = 0; mbs.param[6] = 0; mbs.param[7] = 0; mbs.param[8] = 0; isp_mboxcmd(isp, &mbs); return (mbs.param[0] == MBOX_COMMAND_COMPLETE ? 0 : mbs.param[0]); } /* * Initialize Parameters of Hardware to a known state. * * Locks are held before coming here. */ void isp_init(ispsoftc_t *isp) { if (IS_FC(isp)) { if (IS_24XX(isp)) { isp_fibre_init_2400(isp); } else { isp_fibre_init(isp); } } else { isp_scsi_init(isp); } GET_NANOTIME(&isp->isp_init_time); } static void isp_scsi_init(ispsoftc_t *isp) { sdparam *sdp_chan0, *sdp_chan1; mbreg_t mbs; isp->isp_state = ISP_INITSTATE; sdp_chan0 = SDPARAM(isp, 0); sdp_chan1 = sdp_chan0; if (IS_DUALBUS(isp)) { sdp_chan1 = SDPARAM(isp, 1); } /* First do overall per-card settings. */ /* * If we have fast memory timing enabled, turn it on. */ if (sdp_chan0->isp_fast_mttr) { ISP_WRITE(isp, RISC_MTR, 0x1313); } /* * Set Retry Delay and Count. * You set both channels at the same time. */ MBSINIT(&mbs, MBOX_SET_RETRY_COUNT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_retry_count; mbs.param[2] = sdp_chan0->isp_retry_delay; mbs.param[6] = sdp_chan1->isp_retry_count; mbs.param[7] = sdp_chan1->isp_retry_delay; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * Set ASYNC DATA SETUP time. This is very important. */ MBSINIT(&mbs, MBOX_SET_ASYNC_DATA_SETUP_TIME, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_async_data_setup; mbs.param[2] = sdp_chan1->isp_async_data_setup; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * Set ACTIVE Negation State. */ MBSINIT(&mbs, MBOX_SET_ACT_NEG_STATE, MBLOGNONE, 0); mbs.param[1] = (sdp_chan0->isp_req_ack_active_neg << 4) | (sdp_chan0->isp_data_line_active_neg << 5); mbs.param[2] = (sdp_chan1->isp_req_ack_active_neg << 4) | (sdp_chan1->isp_data_line_active_neg << 5); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "failed to set active negation state (%d,%d), (%d,%d)", sdp_chan0->isp_req_ack_active_neg, sdp_chan0->isp_data_line_active_neg, sdp_chan1->isp_req_ack_active_neg, sdp_chan1->isp_data_line_active_neg); /* * But don't return. */ } /* * Set the Tag Aging limit */ MBSINIT(&mbs, MBOX_SET_TAG_AGE_LIMIT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_tag_aging; mbs.param[2] = sdp_chan1->isp_tag_aging; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "failed to set tag age limit (%d,%d)", sdp_chan0->isp_tag_aging, sdp_chan1->isp_tag_aging); return; } /* * Set selection timeout. */ MBSINIT(&mbs, MBOX_SET_SELECT_TIMEOUT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_selection_timeout; mbs.param[2] = sdp_chan1->isp_selection_timeout; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* now do per-channel settings */ isp_scsi_channel_init(isp, 0); if (IS_DUALBUS(isp)) isp_scsi_channel_init(isp, 1); /* * Now enable request/response queues */ if (IS_ULTRA2(isp) || IS_1240(isp)) { MBSINIT(&mbs, MBOX_INIT_RES_QUEUE_A64, MBLOGALL, 0); mbs.param[1] = RESULT_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_result_dma); mbs.param[3] = DMA_WD0(isp->isp_result_dma); mbs.param[4] = 0; mbs.param[6] = DMA_WD3(isp->isp_result_dma); mbs.param[7] = DMA_WD2(isp->isp_result_dma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_residx = isp->isp_resodx = mbs.param[5]; MBSINIT(&mbs, MBOX_INIT_REQ_QUEUE_A64, MBLOGALL, 0); mbs.param[1] = RQUEST_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[5] = 0; mbs.param[6] = DMA_WD3(isp->isp_result_dma); mbs.param[7] = DMA_WD2(isp->isp_result_dma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = isp->isp_reqodx = mbs.param[4]; } else { MBSINIT(&mbs, MBOX_INIT_RES_QUEUE, MBLOGALL, 0); mbs.param[1] = RESULT_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_result_dma); mbs.param[3] = DMA_WD0(isp->isp_result_dma); mbs.param[4] = 0; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_residx = isp->isp_resodx = mbs.param[5]; MBSINIT(&mbs, MBOX_INIT_REQ_QUEUE, MBLOGALL, 0); mbs.param[1] = RQUEST_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[5] = 0; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = isp->isp_reqodx = mbs.param[4]; } /* * Turn on LVD transitions for ULTRA2 or better and other features * * Now that we have 32 bit handles, don't do any fast posting * any more. For Ultra2/Ultra3 cards, we can turn on 32 bit RIO * operation or use fast posting. To be conservative, we'll only * do this for Ultra3 cards now because the other cards are so * rare for this author to find and test with. */ MBSINIT(&mbs, MBOX_SET_FW_FEATURES, MBLOGALL, 0); if (IS_ULTRA2(isp)) mbs.param[1] |= FW_FEATURE_LVD_NOTIFY; #ifdef ISP_NO_RIO if (IS_ULTRA3(isp)) mbs.param[1] |= FW_FEATURE_FAST_POST; #else if (IS_ULTRA3(isp)) mbs.param[1] |= FW_FEATURE_RIO_32BIT; #endif if (mbs.param[1] != 0) { uint16_t sfeat = mbs.param[1]; isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGINFO, "Enabled FW features (0x%x)", sfeat); } } isp->isp_state = ISP_RUNSTATE; } static void isp_scsi_channel_init(ispsoftc_t *isp, int chan) { sdparam *sdp; mbreg_t mbs; int tgt; sdp = SDPARAM(isp, chan); /* * Set (possibly new) Initiator ID. */ MBSINIT(&mbs, MBOX_SET_INIT_SCSI_ID, MBLOGALL, 0); mbs.param[1] = (chan << 7) | sdp->isp_initiator_id; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp_prt(isp, ISP_LOGINFO, "Chan %d Initiator ID is %d", chan, sdp->isp_initiator_id); /* * Set current per-target parameters to an initial safe minimum. */ for (tgt = 0; tgt < MAX_TARGETS; tgt++) { int lun; uint16_t sdf; if (sdp->isp_devparam[tgt].dev_enable == 0) { continue; } #ifndef ISP_TARGET_MODE sdf = sdp->isp_devparam[tgt].goal_flags; sdf &= DPARM_SAFE_DFLT; /* * It is not quite clear when this changed over so that * we could force narrow and async for 1000/1020 cards, * but assume that this is only the case for loaded * firmware. */ if (isp->isp_loaded_fw) { sdf |= DPARM_NARROW | DPARM_ASYNC; } #else /* * The !$*!)$!$)* f/w uses the same index into some * internal table to decide how to respond to negotiations, * so if we've said "let's be safe" for ID X, and ID X * selects *us*, the negotiations will back to 'safe' * (as in narrow/async). What the f/w *should* do is * use the initiator id settings to decide how to respond. */ sdp->isp_devparam[tgt].goal_flags = sdf = DPARM_DEFAULT; #endif MBSINIT(&mbs, MBOX_SET_TARGET_PARAMS, MBLOGNONE, 0); mbs.param[1] = (chan << 15) | (tgt << 8); mbs.param[2] = sdf; if ((sdf & DPARM_SYNC) == 0) { mbs.param[3] = 0; } else { mbs.param[3] = (sdp->isp_devparam[tgt].goal_offset << 8) | (sdp->isp_devparam[tgt].goal_period); } isp_prt(isp, ISP_LOGDEBUG0, "Initial Settings bus%d tgt%d flags 0x%x off 0x%x per 0x%x", chan, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { sdf = DPARM_SAFE_DFLT; MBSINIT(&mbs, MBOX_SET_TARGET_PARAMS, MBLOGALL, 0); mbs.param[1] = (tgt << 8) | (chan << 15); mbs.param[2] = sdf; mbs.param[3] = 0; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { continue; } } /* * We don't update any information directly from the f/w * because we need to run at least one command to cause a * new state to be latched up. So, we just assume that we * converge to the values we just had set. * * Ensure that we don't believe tagged queuing is enabled yet. * It turns out that sometimes the ISP just ignores our * attempts to set parameters for devices that it hasn't * seen yet. */ sdp->isp_devparam[tgt].actv_flags = sdf & ~DPARM_TQING; for (lun = 0; lun < (int) isp->isp_maxluns; lun++) { MBSINIT(&mbs, MBOX_SET_DEV_QUEUE_PARAMS, MBLOGALL, 0); mbs.param[1] = (chan << 15) | (tgt << 8) | lun; mbs.param[2] = sdp->isp_max_queue_depth; mbs.param[3] = sdp->isp_devparam[tgt].exc_throttle; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } } } for (tgt = 0; tgt < MAX_TARGETS; tgt++) { if (sdp->isp_devparam[tgt].dev_refresh) { sdp->sendmarker = 1; sdp->update = 1; break; } } } /* * Fibre Channel specific initialization. */ static void isp_fibre_init(ispsoftc_t *isp) { fcparam *fcp; isp_icb_t local, *icbp = &local; mbreg_t mbs; /* * We only support one channel on non-24XX cards */ fcp = FCPARAM(isp, 0); if (fcp->role == ISP_ROLE_NONE) return; isp->isp_state = ISP_INITSTATE; ISP_MEMZERO(icbp, sizeof (*icbp)); icbp->icb_version = ICB_VERSION1; icbp->icb_fwoptions = fcp->isp_fwoptions; /* * Firmware Options are either retrieved from NVRAM or * are patched elsewhere. We check them for sanity here * and make changes based on board revision, but otherwise * let others decide policy. */ /* * If this is a 2100 < revision 5, we have to turn off FAIRNESS. */ if (IS_2100(isp) && isp->isp_revision < 5) { icbp->icb_fwoptions &= ~ICBOPT_FAIRNESS; } /* * We have to use FULL LOGIN even though it resets the loop too much * because otherwise port database entries don't get updated after * a LIP- this is a known f/w bug for 2100 f/w less than 1.17.0. */ if (!ISP_FW_NEWER_THAN(isp, 1, 17, 0)) { icbp->icb_fwoptions |= ICBOPT_FULL_LOGIN; } /* * Insist on Port Database Update Async notifications */ icbp->icb_fwoptions |= ICBOPT_PDBCHANGE_AE; /* * Make sure that target role reflects into fwoptions. */ if (fcp->role & ISP_ROLE_TARGET) { icbp->icb_fwoptions |= ICBOPT_TGT_ENABLE; } else { icbp->icb_fwoptions &= ~ICBOPT_TGT_ENABLE; } /* * For some reason my 2200 does not generate ATIOs in target mode * if initiator is disabled. Extra logins are better then target * not working at all. */ if ((fcp->role & ISP_ROLE_INITIATOR) || IS_2100(isp) || IS_2200(isp)) { icbp->icb_fwoptions &= ~ICBOPT_INI_DISABLE; } else { icbp->icb_fwoptions |= ICBOPT_INI_DISABLE; } icbp->icb_maxfrmlen = DEFAULT_FRAMESIZE(isp); if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) { isp_prt(isp, ISP_LOGERR, "bad frame length (%d) from NVRAM- using %d", DEFAULT_FRAMESIZE(isp), ICB_DFLT_FRMLEN); icbp->icb_maxfrmlen = ICB_DFLT_FRMLEN; } icbp->icb_maxalloc = fcp->isp_maxalloc; if (icbp->icb_maxalloc < 1) { isp_prt(isp, ISP_LOGERR, "bad maximum allocation (%d)- using 16", fcp->isp_maxalloc); icbp->icb_maxalloc = 16; } icbp->icb_execthrottle = DEFAULT_EXEC_THROTTLE(isp); if (icbp->icb_execthrottle < 1) { isp_prt(isp, ISP_LOGERR, "bad execution throttle of %d- using %d", DEFAULT_EXEC_THROTTLE(isp), ICB_DFLT_THROTTLE); icbp->icb_execthrottle = ICB_DFLT_THROTTLE; } icbp->icb_retry_delay = fcp->isp_retry_delay; icbp->icb_retry_count = fcp->isp_retry_count; if (fcp->isp_loopid < LOCAL_LOOP_LIM) { icbp->icb_hardaddr = fcp->isp_loopid; if (isp->isp_confopts & ISP_CFG_OWNLOOPID) icbp->icb_fwoptions |= ICBOPT_HARD_ADDRESS; else icbp->icb_fwoptions |= ICBOPT_PREV_ADDRESS; } /* * Right now we just set extended options to prefer point-to-point * over loop based upon some soft config options. * * NB: for the 2300, ICBOPT_EXTENDED is required. */ if (IS_2100(isp)) { /* * We can't have Fast Posting any more- we now * have 32 bit handles. */ icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; } else if (IS_2200(isp) || IS_23XX(isp)) { icbp->icb_fwoptions |= ICBOPT_EXTENDED; icbp->icb_xfwoptions = fcp->isp_xfwoptions; if (ISP_CAP_FCTAPE(isp)) { if (isp->isp_confopts & ISP_CFG_NOFCTAPE) icbp->icb_xfwoptions &= ~ICBXOPT_FCTAPE; if (isp->isp_confopts & ISP_CFG_FCTAPE) icbp->icb_xfwoptions |= ICBXOPT_FCTAPE; if (icbp->icb_xfwoptions & ICBXOPT_FCTAPE) { icbp->icb_fwoptions &= ~ICBOPT_FULL_LOGIN; /* per documents */ icbp->icb_xfwoptions |= ICBXOPT_FCTAPE_CCQ|ICBXOPT_FCTAPE_CONFIRM; FCPARAM(isp, 0)->fctape_enabled = 1; } else { FCPARAM(isp, 0)->fctape_enabled = 0; } } else { icbp->icb_xfwoptions &= ~ICBXOPT_FCTAPE; FCPARAM(isp, 0)->fctape_enabled = 0; } /* * Prefer or force Point-To-Point instead Loop? */ switch (isp->isp_confopts & ISP_CFG_PORT_PREF) { - case ISP_CFG_NPORT: + case ISP_CFG_LPORT_ONLY: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; - icbp->icb_xfwoptions |= ICBXOPT_PTP_2_LOOP; + icbp->icb_xfwoptions |= ICBXOPT_LOOP_ONLY; break; case ISP_CFG_NPORT_ONLY: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_PTP_ONLY; break; - case ISP_CFG_LPORT_ONLY: + case ISP_CFG_LPORT: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; - icbp->icb_xfwoptions |= ICBXOPT_LOOP_ONLY; + icbp->icb_xfwoptions |= ICBXOPT_LOOP_2_PTP; break; + case ISP_CFG_NPORT: + icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; + icbp->icb_xfwoptions |= ICBXOPT_PTP_2_LOOP; + break; default: - /* - * Let NVRAM settings define it if they are sane - */ + /* Let NVRAM settings define it if they are sane */ switch (icbp->icb_xfwoptions & ICBXOPT_TOPO_MASK) { case ICBXOPT_PTP_2_LOOP: case ICBXOPT_PTP_ONLY: case ICBXOPT_LOOP_ONLY: case ICBXOPT_LOOP_2_PTP: break; default: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_LOOP_2_PTP; } break; } if (IS_2200(isp)) { /* * We can't have Fast Posting any more- we now * have 32 bit handles. * * RIO seemed to have to much breakage. * * Just opt for safety. */ icbp->icb_xfwoptions &= ~ICBXOPT_RIO_16BIT; icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; } else { /* * QLogic recommends that FAST Posting be turned * off for 23XX cards and instead allow the HBA * to write response queue entries and interrupt * after a delay (ZIO). */ icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; if ((fcp->isp_xfwoptions & ICBXOPT_TIMER_MASK) == ICBXOPT_ZIO) { icbp->icb_xfwoptions |= ICBXOPT_ZIO; icbp->icb_idelaytimer = 10; } icbp->icb_zfwoptions = fcp->isp_zfwoptions; if (isp->isp_confopts & ISP_CFG_1GB) { icbp->icb_zfwoptions &= ~ICBZOPT_RATE_MASK; icbp->icb_zfwoptions |= ICBZOPT_RATE_1GB; } else if (isp->isp_confopts & ISP_CFG_2GB) { icbp->icb_zfwoptions &= ~ICBZOPT_RATE_MASK; icbp->icb_zfwoptions |= ICBZOPT_RATE_2GB; } else { switch (icbp->icb_zfwoptions & ICBZOPT_RATE_MASK) { case ICBZOPT_RATE_1GB: case ICBZOPT_RATE_2GB: case ICBZOPT_RATE_AUTO: break; default: icbp->icb_zfwoptions &= ~ICBZOPT_RATE_MASK; icbp->icb_zfwoptions |= ICBZOPT_RATE_AUTO; break; } } } } /* * For 22XX > 2.1.26 && 23XX, set some options. */ if (ISP_FW_NEWER_THAN(isp, 2, 26, 0)) { MBSINIT(&mbs, MBOX_SET_FIRMWARE_OPTIONS, MBLOGALL, 0); mbs.param[1] = IFCOPT1_DISF7SWTCH|IFCOPT1_LIPASYNC|IFCOPT1_LIPF8; mbs.param[2] = 0; mbs.param[3] = 0; if (ISP_FW_NEWER_THAN(isp, 3, 16, 0)) { mbs.param[1] |= IFCOPT1_EQFQASYNC|IFCOPT1_CTIO_RETRY; if (fcp->role & ISP_ROLE_TARGET) { if (ISP_FW_NEWER_THAN(isp, 3, 25, 0)) { mbs.param[1] |= IFCOPT1_ENAPURE; } mbs.param[3] = IFCOPT3_NOPRLI; } } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } } icbp->icb_logintime = ICB_LOGIN_TOV; #ifdef ISP_TARGET_MODE if (icbp->icb_fwoptions & ICBOPT_TGT_ENABLE) { icbp->icb_lunenables = 0xffff; icbp->icb_ccnt = 0xff; icbp->icb_icnt = 0xff; icbp->icb_lunetimeout = ICB_LUN_ENABLE_TOV; } #endif if (fcp->isp_wwnn && fcp->isp_wwpn) { icbp->icb_fwoptions |= ICBOPT_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, fcp->isp_wwnn); MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node 0x%08x%08x Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwnn >> 32)), ((uint32_t) (fcp->isp_wwnn)), ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else if (fcp->isp_wwpn) { icbp->icb_fwoptions &= ~ICBOPT_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else { isp_prt(isp, ISP_LOGERR, "No valid WWNs to use"); return; } icbp->icb_rqstqlen = RQUEST_QUEUE_LEN(isp); if (icbp->icb_rqstqlen < 1) { isp_prt(isp, ISP_LOGERR, "bad request queue length"); } icbp->icb_rsltqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_rsltqlen < 1) { isp_prt(isp, ISP_LOGERR, "bad result queue length"); } icbp->icb_rqstaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_rquest_dma); icbp->icb_respaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_result_dma); if (FC_SCRATCH_ACQUIRE(isp, 0)) { isp_prt(isp, ISP_LOGERR, sacq); return; } isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init: fwopt 0x%x xfwopt 0x%x zfwopt 0x%x", icbp->icb_fwoptions, icbp->icb_xfwoptions, icbp->icb_zfwoptions); isp_put_icb(isp, icbp, (isp_icb_t *)fcp->isp_scratch); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "isp_fibre_init", sizeof(*icbp), fcp->isp_scratch); } /* * Init the firmware */ MBSINIT(&mbs, MBOX_INIT_FIRMWARE, MBLOGALL, 30000000); mbs.param[1] = 0; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); isp_prt(isp, ISP_LOGDEBUG0, "INIT F/W from %p (%08x%08x)", fcp->isp_scratch, (uint32_t) ((uint64_t)fcp->isp_scdma >> 32), (uint32_t) fcp->isp_scdma); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, sizeof (*icbp), 0); isp_mboxcmd(isp, &mbs); FC_SCRATCH_RELEASE(isp, 0); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) return; isp->isp_reqidx = 0; isp->isp_reqodx = 0; isp->isp_residx = 0; isp->isp_resodx = 0; /* * Whatever happens, we're now committed to being here. */ isp->isp_state = ISP_RUNSTATE; } static void isp_fibre_init_2400(ispsoftc_t *isp) { fcparam *fcp; isp_icb_2400_t local, *icbp = &local; mbreg_t mbs; int chan; /* * Check to see whether all channels have *some* kind of role */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role != ISP_ROLE_NONE) { break; } } if (chan == isp->isp_nchan) { isp_prt(isp, ISP_LOG_WARN1, "all %d channels with role 'none'", chan); return; } isp->isp_state = ISP_INITSTATE; /* * Start with channel 0. */ fcp = FCPARAM(isp, 0); /* * Turn on LIP F8 async event (1) */ MBSINIT(&mbs, MBOX_SET_FIRMWARE_OPTIONS, MBLOGALL, 0); mbs.param[1] = 1; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } ISP_MEMZERO(icbp, sizeof (*icbp)); icbp->icb_fwoptions1 = fcp->isp_fwoptions; icbp->icb_fwoptions2 = fcp->isp_xfwoptions; icbp->icb_fwoptions3 = fcp->isp_zfwoptions; if (isp->isp_nchan > 1 && ISP_CAP_VP0(isp)) { icbp->icb_fwoptions1 &= ~ICB2400_OPT1_INI_DISABLE; icbp->icb_fwoptions1 |= ICB2400_OPT1_TGT_ENABLE; } else { if (fcp->role & ISP_ROLE_TARGET) icbp->icb_fwoptions1 |= ICB2400_OPT1_TGT_ENABLE; else icbp->icb_fwoptions1 &= ~ICB2400_OPT1_TGT_ENABLE; if (fcp->role & ISP_ROLE_INITIATOR) icbp->icb_fwoptions1 &= ~ICB2400_OPT1_INI_DISABLE; else icbp->icb_fwoptions1 |= ICB2400_OPT1_INI_DISABLE; } icbp->icb_version = ICB_VERSION1; icbp->icb_maxfrmlen = DEFAULT_FRAMESIZE(isp); if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) { isp_prt(isp, ISP_LOGERR, "bad frame length (%d) from NVRAM- using %d", DEFAULT_FRAMESIZE(isp), ICB_DFLT_FRMLEN); icbp->icb_maxfrmlen = ICB_DFLT_FRMLEN; } icbp->icb_execthrottle = DEFAULT_EXEC_THROTTLE(isp); if (icbp->icb_execthrottle < 1) { isp_prt(isp, ISP_LOGERR, "bad execution throttle of %d- using %d", DEFAULT_EXEC_THROTTLE(isp), ICB_DFLT_THROTTLE); icbp->icb_execthrottle = ICB_DFLT_THROTTLE; } /* * Set target exchange count. Take half if we are supporting both roles. */ if (icbp->icb_fwoptions1 & ICB2400_OPT1_TGT_ENABLE) { icbp->icb_xchgcnt = isp->isp_maxcmds; if ((icbp->icb_fwoptions1 & ICB2400_OPT1_INI_DISABLE) == 0) icbp->icb_xchgcnt >>= 1; } if (fcp->isp_loopid < LOCAL_LOOP_LIM) { icbp->icb_hardaddr = fcp->isp_loopid; if (isp->isp_confopts & ISP_CFG_OWNLOOPID) icbp->icb_fwoptions1 |= ICB2400_OPT1_HARD_ADDRESS; else icbp->icb_fwoptions1 |= ICB2400_OPT1_PREV_ADDRESS; } if (isp->isp_confopts & ISP_CFG_NOFCTAPE) { icbp->icb_fwoptions2 &= ~ICB2400_OPT2_FCTAPE; } if (isp->isp_confopts & ISP_CFG_FCTAPE) { icbp->icb_fwoptions2 |= ICB2400_OPT2_FCTAPE; } for (chan = 0; chan < isp->isp_nchan; chan++) { if (icbp->icb_fwoptions2 & ICB2400_OPT2_FCTAPE) FCPARAM(isp, chan)->fctape_enabled = 1; else FCPARAM(isp, chan)->fctape_enabled = 0; } switch (isp->isp_confopts & ISP_CFG_PORT_PREF) { - case ISP_CFG_NPORT_ONLY: - icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; - icbp->icb_fwoptions2 |= ICB2400_OPT2_PTP_ONLY; - break; case ISP_CFG_LPORT_ONLY: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_ONLY; break; - default: + case ISP_CFG_NPORT_ONLY: + icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; + icbp->icb_fwoptions2 |= ICB2400_OPT2_PTP_ONLY; + break; + case ISP_CFG_NPORT: /* ISP_CFG_PTP_2_LOOP not available in 24XX/25XX */ + case ISP_CFG_LPORT: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_2_PTP; break; + default: + /* Let NVRAM settings define it if they are sane */ + switch (icbp->icb_fwoptions2 & ICB2400_OPT2_TOPO_MASK) { + case ICB2400_OPT2_LOOP_ONLY: + case ICB2400_OPT2_PTP_ONLY: + case ICB2400_OPT2_LOOP_2_PTP: + break; + default: + icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; + icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_2_PTP; + } + break; } switch (icbp->icb_fwoptions2 & ICB2400_OPT2_TIMER_MASK) { case ICB2400_OPT2_ZIO: case ICB2400_OPT2_ZIO1: icbp->icb_idelaytimer = 0; break; case 0: break; default: isp_prt(isp, ISP_LOGWARN, "bad value %x in fwopt2 timer field", icbp->icb_fwoptions2 & ICB2400_OPT2_TIMER_MASK); icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TIMER_MASK; break; } if (IS_26XX(isp)) { /* We don't support MSI-X yet, so set this unconditionally. */ icbp->icb_fwoptions2 |= ICB2400_OPT2_ENA_IHR; icbp->icb_fwoptions2 |= ICB2400_OPT2_ENA_IHA; } if ((icbp->icb_fwoptions3 & ICB2400_OPT3_RSPSZ_MASK) == 0) { icbp->icb_fwoptions3 |= ICB2400_OPT3_RSPSZ_24; } if (isp->isp_confopts & ISP_CFG_1GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_1GB; } else if (isp->isp_confopts & ISP_CFG_2GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_2GB; } else if (isp->isp_confopts & ISP_CFG_4GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_4GB; } else if (isp->isp_confopts & ISP_CFG_8GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_8GB; } else if (isp->isp_confopts & ISP_CFG_16GB) { icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_16GB; } else { switch (icbp->icb_fwoptions3 & ICB2400_OPT3_RATE_MASK) { case ICB2400_OPT3_RATE_4GB: case ICB2400_OPT3_RATE_8GB: case ICB2400_OPT3_RATE_16GB: case ICB2400_OPT3_RATE_AUTO: break; case ICB2400_OPT3_RATE_2GB: if (isp->isp_type <= ISP_HA_FC_2500) break; /*FALLTHROUGH*/ case ICB2400_OPT3_RATE_1GB: if (isp->isp_type <= ISP_HA_FC_2400) break; /*FALLTHROUGH*/ default: icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_MASK; icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_AUTO; break; } } icbp->icb_logintime = ICB_LOGIN_TOV; if (fcp->isp_wwnn && fcp->isp_wwpn) { icbp->icb_fwoptions1 |= ICB2400_OPT1_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, fcp->isp_wwnn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node 0x%08x%08x Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwnn >> 32)), ((uint32_t) (fcp->isp_wwnn)), ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else if (fcp->isp_wwpn) { icbp->icb_fwoptions1 &= ~ICB2400_OPT1_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node to be same as Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else { isp_prt(isp, ISP_LOGERR, "No valid WWNs to use"); return; } icbp->icb_retry_count = fcp->isp_retry_count; icbp->icb_rqstqlen = RQUEST_QUEUE_LEN(isp); if (icbp->icb_rqstqlen < 8) { isp_prt(isp, ISP_LOGERR, "bad request queue length %d", icbp->icb_rqstqlen); return; } icbp->icb_rsltqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_rsltqlen < 8) { isp_prt(isp, ISP_LOGERR, "bad result queue length %d", icbp->icb_rsltqlen); return; } icbp->icb_rqstaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_rquest_dma); icbp->icb_respaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_result_dma); #ifdef ISP_TARGET_MODE /* unconditionally set up the ATIO queue if we support target mode */ icbp->icb_atioqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_atioqlen < 8) { isp_prt(isp, ISP_LOGERR, "bad ATIO queue length %d", icbp->icb_atioqlen); return; } icbp->icb_atioqaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_atioq_dma); icbp->icb_atioqaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_atioq_dma); icbp->icb_atioqaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_atioq_dma); icbp->icb_atioqaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_atioq_dma); isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: atioq %04x%04x%04x%04x", DMA_WD3(isp->isp_atioq_dma), DMA_WD2(isp->isp_atioq_dma), DMA_WD1(isp->isp_atioq_dma), DMA_WD0(isp->isp_atioq_dma)); #endif isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: fwopt1 0x%x fwopt2 0x%x fwopt3 0x%x", icbp->icb_fwoptions1, icbp->icb_fwoptions2, icbp->icb_fwoptions3); isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: rqst %04x%04x%04x%04x rsp %04x%04x%04x%04x", DMA_WD3(isp->isp_rquest_dma), DMA_WD2(isp->isp_rquest_dma), DMA_WD1(isp->isp_rquest_dma), DMA_WD0(isp->isp_rquest_dma), DMA_WD3(isp->isp_result_dma), DMA_WD2(isp->isp_result_dma), DMA_WD1(isp->isp_result_dma), DMA_WD0(isp->isp_result_dma)); if (FC_SCRATCH_ACQUIRE(isp, 0)) { isp_prt(isp, ISP_LOGERR, sacq); return; } ISP_MEMZERO(fcp->isp_scratch, ISP_FC_SCRLEN); isp_put_icb_2400(isp, icbp, fcp->isp_scratch); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "isp_fibre_init_2400", sizeof (*icbp), fcp->isp_scratch); } /* * Now fill in information about any additional channels */ if (isp->isp_nchan > 1) { isp_icb_2400_vpinfo_t vpinfo, *vdst; vp_port_info_t pi, *pdst; size_t amt = 0; uint8_t *off; vpinfo.vp_global_options = ICB2400_VPGOPT_GEN_RIDA; if (ISP_CAP_VP0(isp)) { vpinfo.vp_global_options |= ICB2400_VPGOPT_VP0_DECOUPLE; vpinfo.vp_count = isp->isp_nchan; chan = 0; } else { vpinfo.vp_count = isp->isp_nchan - 1; chan = 1; } off = fcp->isp_scratch; off += ICB2400_VPINFO_OFF; vdst = (isp_icb_2400_vpinfo_t *) off; isp_put_icb_2400_vpinfo(isp, &vpinfo, vdst); amt = ICB2400_VPINFO_OFF + sizeof (isp_icb_2400_vpinfo_t); for (; chan < isp->isp_nchan; chan++) { fcparam *fcp2; ISP_MEMZERO(&pi, sizeof (pi)); fcp2 = FCPARAM(isp, chan); if (fcp2->role != ISP_ROLE_NONE) { pi.vp_port_options = ICB2400_VPOPT_ENABLED | ICB2400_VPOPT_ENA_SNSLOGIN; if (fcp2->role & ISP_ROLE_INITIATOR) pi.vp_port_options |= ICB2400_VPOPT_INI_ENABLE; if ((fcp2->role & ISP_ROLE_TARGET) == 0) pi.vp_port_options |= ICB2400_VPOPT_TGT_DISABLE; } if (fcp2->isp_loopid < LOCAL_LOOP_LIM) { pi.vp_port_loopid = fcp2->isp_loopid; if (isp->isp_confopts & ISP_CFG_OWNLOOPID) pi.vp_port_options |= ICB2400_VPOPT_HARD_ADDRESS; else pi.vp_port_options |= ICB2400_VPOPT_PREV_ADDRESS; } MAKE_NODE_NAME_FROM_WWN(pi.vp_port_portname, fcp2->isp_wwpn); MAKE_NODE_NAME_FROM_WWN(pi.vp_port_nodename, fcp2->isp_wwnn); off = fcp->isp_scratch; if (ISP_CAP_VP0(isp)) off += ICB2400_VPINFO_PORT_OFF(chan); else off += ICB2400_VPINFO_PORT_OFF(chan - 1); pdst = (vp_port_info_t *) off; isp_put_vp_port_info(isp, &pi, pdst); amt += ICB2400_VPOPT_WRITE_SIZE; } if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "isp_fibre_init_2400", amt - ICB2400_VPINFO_OFF, (char *)fcp->isp_scratch + ICB2400_VPINFO_OFF); } } /* * Init the firmware */ MBSINIT(&mbs, 0, MBLOGALL, 30000000); if (isp->isp_nchan > 1) { mbs.param[0] = MBOX_INIT_FIRMWARE_MULTI_ID; } else { mbs.param[0] = MBOX_INIT_FIRMWARE; } mbs.param[1] = 0; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); isp_prt(isp, ISP_LOGDEBUG0, "INIT F/W from %04x%04x%04x%04x", DMA_WD3(fcp->isp_scdma), DMA_WD2(fcp->isp_scdma), DMA_WD1(fcp->isp_scdma), DMA_WD0(fcp->isp_scdma)); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, sizeof (*icbp), 0); isp_mboxcmd(isp, &mbs); FC_SCRATCH_RELEASE(isp, 0); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = 0; isp->isp_reqodx = 0; isp->isp_residx = 0; isp->isp_resodx = 0; isp->isp_atioodx = 0; /* * Whatever happens, we're now committed to being here. */ isp->isp_state = ISP_RUNSTATE; } static int isp_fc_enable_vp(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); vp_modify_t vp; void *reqp; uint8_t resp[QENTRY_LEN]; /* Build a VP MODIFY command in memory */ ISP_MEMZERO(&vp, sizeof(vp)); vp.vp_mod_hdr.rqs_entry_type = RQSTYPE_VP_MODIFY; vp.vp_mod_hdr.rqs_entry_count = 1; vp.vp_mod_cnt = 1; vp.vp_mod_idx0 = chan; vp.vp_mod_cmd = VP_MODIFY_ENA; vp.vp_mod_ports[0].options = ICB2400_VPOPT_ENABLED | ICB2400_VPOPT_ENA_SNSLOGIN; if (fcp->role & ISP_ROLE_INITIATOR) vp.vp_mod_ports[0].options |= ICB2400_VPOPT_INI_ENABLE; if ((fcp->role & ISP_ROLE_TARGET) == 0) vp.vp_mod_ports[0].options |= ICB2400_VPOPT_TGT_DISABLE; if (fcp->isp_loopid < LOCAL_LOOP_LIM) { vp.vp_mod_ports[0].loopid = fcp->isp_loopid; if (isp->isp_confopts & ISP_CFG_OWNLOOPID) vp.vp_mod_ports[0].options |= ICB2400_VPOPT_HARD_ADDRESS; else vp.vp_mod_ports[0].options |= ICB2400_VPOPT_PREV_ADDRESS; } MAKE_NODE_NAME_FROM_WWN(vp.vp_mod_ports[0].wwpn, fcp->isp_wwpn); MAKE_NODE_NAME_FROM_WWN(vp.vp_mod_ports[0].wwnn, fcp->isp_wwnn); /* Prepare space for response in memory */ memset(resp, 0xff, sizeof(resp)); vp.vp_mod_hdl = isp_allocate_handle(isp, resp, ISP_HANDLE_CTRL); if (vp.vp_mod_hdl == 0) { isp_prt(isp, ISP_LOGERR, "%s: VP_MODIFY of Chan %d out of handles", __func__, chan); return (EIO); } /* Send request and wait for response. */ reqp = isp_getrqentry(isp); if (reqp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: VP_MODIFY of Chan %d out of rqent", __func__, chan); isp_destroy_handle(isp, vp.vp_mod_hdl); return (EIO); } isp_put_vp_modify(isp, &vp, (vp_modify_t *)reqp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB VP_MODIFY", QENTRY_LEN, reqp); ISP_SYNC_REQUEST(isp); if (msleep(resp, &isp->isp_lock, 0, "VP_MODIFY", 5*hz) == EWOULDBLOCK) { isp_prt(isp, ISP_LOGERR, "%s: VP_MODIFY of Chan %d timed out", __func__, chan); isp_destroy_handle(isp, vp.vp_mod_hdl); return (EIO); } if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB VP_MODIFY response", QENTRY_LEN, resp); isp_get_vp_modify(isp, (vp_modify_t *)resp, &vp); if (vp.vp_mod_hdr.rqs_flags != 0 || vp.vp_mod_status != VP_STS_OK) { isp_prt(isp, ISP_LOGERR, "%s: VP_MODIFY of Chan %d failed with flags %x status %d", __func__, chan, vp.vp_mod_hdr.rqs_flags, vp.vp_mod_status); return (EIO); } return (0); } static int isp_fc_disable_vp(ispsoftc_t *isp, int chan) { vp_ctrl_info_t vp; void *reqp; uint8_t resp[QENTRY_LEN]; /* Build a VP CTRL command in memory */ ISP_MEMZERO(&vp, sizeof(vp)); vp.vp_ctrl_hdr.rqs_entry_type = RQSTYPE_VP_CTRL; vp.vp_ctrl_hdr.rqs_entry_count = 1; if (ISP_CAP_VP0(isp)) { vp.vp_ctrl_status = 1; } else { vp.vp_ctrl_status = 0; chan--; /* VP0 can not be controlled in this case. */ } vp.vp_ctrl_command = VP_CTRL_CMD_DISABLE_VP_LOGO_ALL; vp.vp_ctrl_vp_count = 1; vp.vp_ctrl_idmap[chan / 16] |= (1 << chan % 16); /* Prepare space for response in memory */ memset(resp, 0xff, sizeof(resp)); vp.vp_ctrl_handle = isp_allocate_handle(isp, resp, ISP_HANDLE_CTRL); if (vp.vp_ctrl_handle == 0) { isp_prt(isp, ISP_LOGERR, "%s: VP_CTRL of Chan %d out of handles", __func__, chan); return (EIO); } /* Send request and wait for response. */ reqp = isp_getrqentry(isp); if (reqp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: VP_CTRL of Chan %d out of rqent", __func__, chan); isp_destroy_handle(isp, vp.vp_ctrl_handle); return (EIO); } isp_put_vp_ctrl_info(isp, &vp, (vp_ctrl_info_t *)reqp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB VP_CTRL", QENTRY_LEN, reqp); ISP_SYNC_REQUEST(isp); if (msleep(resp, &isp->isp_lock, 0, "VP_CTRL", 5*hz) == EWOULDBLOCK) { isp_prt(isp, ISP_LOGERR, "%s: VP_CTRL of Chan %d timed out", __func__, chan); isp_destroy_handle(isp, vp.vp_ctrl_handle); return (EIO); } if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB VP_CTRL response", QENTRY_LEN, resp); isp_get_vp_ctrl_info(isp, (vp_ctrl_info_t *)resp, &vp); if (vp.vp_ctrl_hdr.rqs_flags != 0 || vp.vp_ctrl_status != 0) { isp_prt(isp, ISP_LOGERR, "%s: VP_CTRL of Chan %d failed with flags %x status %d %d", __func__, chan, vp.vp_ctrl_hdr.rqs_flags, vp.vp_ctrl_status, vp.vp_ctrl_index_fail); return (EIO); } return (0); } static int isp_fc_change_role(ispsoftc_t *isp, int chan, int new_role) { fcparam *fcp = FCPARAM(isp, chan); int i, was, res = 0; if (chan >= isp->isp_nchan) { isp_prt(isp, ISP_LOGWARN, "%s: bad channel %d", __func__, chan); return (ENXIO); } if (fcp->role == new_role) return (0); for (was = 0, i = 0; i < isp->isp_nchan; i++) { if (FCPARAM(isp, i)->role != ISP_ROLE_NONE) was++; } if (was == 0 || (was == 1 && fcp->role != ISP_ROLE_NONE)) { fcp->role = new_role; return (isp_reinit(isp, 0)); } if (fcp->role != ISP_ROLE_NONE) { res = isp_fc_disable_vp(isp, chan); isp_clear_portdb(isp, chan); } fcp->role = new_role; if (fcp->role != ISP_ROLE_NONE) res = isp_fc_enable_vp(isp, chan); return (res); } static void isp_clear_portdb(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; int i; for (i = 0; i < MAX_FC_TARG; i++) { lp = &fcp->portdb[i]; switch (lp->state) { case FC_PORTDB_STATE_DEAD: case FC_PORTDB_STATE_CHANGED: case FC_PORTDB_STATE_VALID: lp->state = FC_PORTDB_STATE_NIL; isp_async(isp, ISPASYNC_DEV_GONE, chan, lp); break; case FC_PORTDB_STATE_NIL: case FC_PORTDB_STATE_NEW: lp->state = FC_PORTDB_STATE_NIL; break; case FC_PORTDB_STATE_ZOMBIE: break; default: panic("Don't know how to clear state %d\n", lp->state); } } } static void isp_mark_portdb(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; int i; for (i = 0; i < MAX_FC_TARG; i++) { lp = &fcp->portdb[i]; if (lp->state == FC_PORTDB_STATE_NIL) continue; if (lp->portid >= DOMAIN_CONTROLLER_BASE && lp->portid <= DOMAIN_CONTROLLER_END) continue; fcp->portdb[i].probational = 1; } } /* * Perform an IOCB PLOGI or LOGO via EXECUTE IOCB A64 for 24XX cards * or via FABRIC LOGIN/FABRIC LOGOUT for other cards. */ static int isp_plogx(ispsoftc_t *isp, int chan, uint16_t handle, uint32_t portid, int flags) { isp_plogx_t pl; void *reqp; uint8_t resp[QENTRY_LEN]; uint32_t sst, parm1; int rval, lev; const char *msg; char buf[64]; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d PLOGX %s PortID 0x%06x nphdl 0x%x", chan, (flags & PLOGX_FLG_CMD_MASK) == PLOGX_FLG_CMD_PLOGI ? "Login":"Logout", portid, handle); if (!IS_24XX(isp)) { int action = flags & PLOGX_FLG_CMD_MASK; if (action == PLOGX_FLG_CMD_PLOGI) { return (isp_port_login(isp, handle, portid)); } else if (action == PLOGX_FLG_CMD_LOGO) { return (isp_port_logout(isp, handle, portid)); } else { return (MBOX_INVALID_COMMAND); } } ISP_MEMZERO(&pl, sizeof(pl)); pl.plogx_header.rqs_entry_count = 1; pl.plogx_header.rqs_entry_type = RQSTYPE_LOGIN; pl.plogx_nphdl = handle; pl.plogx_vphdl = chan; pl.plogx_portlo = portid; pl.plogx_rspsz_porthi = (portid >> 16) & 0xff; pl.plogx_flags = flags; /* Prepare space for response in memory */ memset(resp, 0xff, sizeof(resp)); pl.plogx_handle = isp_allocate_handle(isp, resp, ISP_HANDLE_CTRL); if (pl.plogx_handle == 0) { isp_prt(isp, ISP_LOGERR, "%s: PLOGX of Chan %d out of handles", __func__, chan); return (-1); } /* Send request and wait for response. */ reqp = isp_getrqentry(isp); if (reqp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: PLOGX of Chan %d out of rqent", __func__, chan); isp_destroy_handle(isp, pl.plogx_handle); return (-1); } isp_put_plogx(isp, &pl, (isp_plogx_t *)reqp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB LOGX", QENTRY_LEN, reqp); + FCPARAM(isp, chan)->isp_login_hdl = handle; ISP_SYNC_REQUEST(isp); if (msleep(resp, &isp->isp_lock, 0, "PLOGX", 3 * ICB_LOGIN_TOV * hz) == EWOULDBLOCK) { isp_prt(isp, ISP_LOGERR, "%s: PLOGX of Chan %d timed out", __func__, chan); isp_destroy_handle(isp, pl.plogx_handle); return (-1); } + FCPARAM(isp, chan)->isp_login_hdl = NIL_HANDLE; if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "IOCB LOGX response", QENTRY_LEN, resp); isp_get_plogx(isp, (isp_plogx_t *)resp, &pl); if (pl.plogx_status == PLOGX_STATUS_OK) { return (0); } else if (pl.plogx_status != PLOGX_STATUS_IOCBERR) { isp_prt(isp, ISP_LOGWARN, "status 0x%x on port login IOCB channel %d", pl.plogx_status, chan); return (-1); } sst = pl.plogx_ioparm[0].lo16 | (pl.plogx_ioparm[0].hi16 << 16); parm1 = pl.plogx_ioparm[1].lo16 | (pl.plogx_ioparm[1].hi16 << 16); rval = -1; lev = ISP_LOGERR; msg = NULL; switch (sst) { case PLOGX_IOCBERR_NOLINK: msg = "no link"; break; case PLOGX_IOCBERR_NOIOCB: msg = "no IOCB buffer"; break; case PLOGX_IOCBERR_NOXGHG: msg = "no Exchange Control Block"; break; case PLOGX_IOCBERR_FAILED: ISP_SNPRINTF(buf, sizeof (buf), "reason 0x%x (last LOGIN state 0x%x)", parm1 & 0xff, (parm1 >> 8) & 0xff); msg = buf; break; case PLOGX_IOCBERR_NOFABRIC: msg = "no fabric"; break; case PLOGX_IOCBERR_NOTREADY: msg = "firmware not ready"; break; case PLOGX_IOCBERR_NOLOGIN: ISP_SNPRINTF(buf, sizeof (buf), "not logged in (last state 0x%x)", parm1); msg = buf; rval = MBOX_NOT_LOGGED_IN; break; case PLOGX_IOCBERR_REJECT: ISP_SNPRINTF(buf, sizeof (buf), "LS_RJT = 0x%x", parm1); msg = buf; break; case PLOGX_IOCBERR_NOPCB: msg = "no PCB allocated"; break; case PLOGX_IOCBERR_EINVAL: ISP_SNPRINTF(buf, sizeof (buf), "invalid parameter at offset 0x%x", parm1); msg = buf; break; case PLOGX_IOCBERR_PORTUSED: lev = ISP_LOG_SANCFG|ISP_LOG_WARN1; ISP_SNPRINTF(buf, sizeof (buf), "already logged in with N-Port handle 0x%x", parm1); msg = buf; rval = MBOX_PORT_ID_USED | (parm1 << 16); break; case PLOGX_IOCBERR_HNDLUSED: lev = ISP_LOG_SANCFG|ISP_LOG_WARN1; ISP_SNPRINTF(buf, sizeof (buf), "handle already used for PortID 0x%06x", parm1); msg = buf; rval = MBOX_LOOP_ID_USED; break; case PLOGX_IOCBERR_NOHANDLE: msg = "no handle allocated"; break; case PLOGX_IOCBERR_NOFLOGI: msg = "no FLOGI_ACC"; break; default: ISP_SNPRINTF(buf, sizeof (buf), "status %x from %x", pl.plogx_status, flags); msg = buf; break; } if (msg) { isp_prt(isp, ISP_LOGERR, "Chan %d PLOGX PortID 0x%06x to N-Port handle 0x%x: %s", chan, portid, handle, msg); } return (rval); } static int isp_port_login(ispsoftc_t *isp, uint16_t handle, uint32_t portid) { mbreg_t mbs; MBSINIT(&mbs, MBOX_FABRIC_LOGIN, MBLOGNONE, 500000); if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = handle; mbs.ibits = (1 << 10); } else { mbs.param[1] = handle << 8; } mbs.param[2] = portid >> 16; mbs.param[3] = portid; mbs.logval = MBLOGNONE; mbs.timeout = 500000; isp_mboxcmd(isp, &mbs); switch (mbs.param[0]) { case MBOX_PORT_ID_USED: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: portid 0x%06x already logged in as 0x%x", portid, mbs.param[1]); return (MBOX_PORT_ID_USED | (mbs.param[1] << 16)); case MBOX_LOOP_ID_USED: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: handle 0x%x in use for port id 0x%02xXXXX", handle, mbs.param[1] & 0xff); return (MBOX_LOOP_ID_USED); case MBOX_COMMAND_COMPLETE: return (0); case MBOX_COMMAND_ERROR: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: error 0x%x in PLOGI to port 0x%06x", mbs.param[1], portid); return (MBOX_COMMAND_ERROR); case MBOX_ALL_IDS_USED: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: all IDs used for fabric login"); return (MBOX_ALL_IDS_USED); default: isp_prt(isp, ISP_LOG_SANCFG, "isp_port_login: error 0x%x on port login of 0x%06x@0x%0x", mbs.param[0], portid, handle); return (mbs.param[0]); } } /* * Pre-24XX fabric port logout * * Note that portid is not used */ static int isp_port_logout(ispsoftc_t *isp, uint16_t handle, uint32_t portid) { mbreg_t mbs; MBSINIT(&mbs, MBOX_FABRIC_LOGOUT, MBLOGNONE, 500000); if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = handle; mbs.ibits = (1 << 10); } else { mbs.param[1] = handle << 8; } isp_mboxcmd(isp, &mbs); return (mbs.param[0] == MBOX_COMMAND_COMPLETE? 0 : mbs.param[0]); } static int isp_getpdb(ispsoftc_t *isp, int chan, uint16_t id, isp_pdb_t *pdb) { mbreg_t mbs; union { isp_pdb_21xx_t fred; isp_pdb_24xx_t bill; } un; MBSINIT(&mbs, MBOX_GET_PORT_DB, MBLOGALL & ~MBLOGMASK(MBOX_COMMAND_PARAM_ERROR), 250000); if (IS_24XX(isp)) { mbs.ibits = (1 << 9)|(1 << 10); mbs.param[1] = id; mbs.param[9] = chan; } else if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = id; } else { mbs.param[1] = id << 8; } mbs.param[2] = DMA_WD1(isp->isp_iocb_dma); mbs.param[3] = DMA_WD0(isp->isp_iocb_dma); mbs.param[6] = DMA_WD3(isp->isp_iocb_dma); mbs.param[7] = DMA_WD2(isp->isp_iocb_dma); MEMORYBARRIER(isp, SYNC_IFORDEV, 0, sizeof(un), chan); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) return (mbs.param[0] | (mbs.param[1] << 16)); MEMORYBARRIER(isp, SYNC_IFORCPU, 0, sizeof(un), chan); if (IS_24XX(isp)) { isp_get_pdb_24xx(isp, isp->isp_iocb, &un.bill); pdb->handle = un.bill.pdb_handle; pdb->prli_word3 = un.bill.pdb_prli_svc3; pdb->portid = BITS2WORD_24XX(un.bill.pdb_portid_bits); ISP_MEMCPY(pdb->portname, un.bill.pdb_portname, 8); ISP_MEMCPY(pdb->nodename, un.bill.pdb_nodename, 8); isp_prt(isp, ISP_LOGDEBUG1, "Chan %d handle 0x%x Port 0x%06x flags 0x%x curstate %x", chan, id, pdb->portid, un.bill.pdb_flags, un.bill.pdb_curstate); if (un.bill.pdb_curstate < PDB2400_STATE_PLOGI_DONE || un.bill.pdb_curstate > PDB2400_STATE_LOGGED_IN) { mbs.param[0] = MBOX_NOT_LOGGED_IN; return (mbs.param[0]); } } else { isp_get_pdb_21xx(isp, isp->isp_iocb, &un.fred); pdb->handle = un.fred.pdb_loopid; pdb->prli_word3 = un.fred.pdb_prli_svc3; pdb->portid = BITS2WORD(un.fred.pdb_portid_bits); ISP_MEMCPY(pdb->portname, un.fred.pdb_portname, 8); ISP_MEMCPY(pdb->nodename, un.fred.pdb_nodename, 8); isp_prt(isp, ISP_LOGDEBUG1, "Chan %d handle 0x%x Port 0x%06x", chan, id, pdb->portid); } return (0); } static int isp_gethandles(ispsoftc_t *isp, int chan, uint16_t *handles, int *num, int loop) { fcparam *fcp = FCPARAM(isp, chan); mbreg_t mbs; isp_pnhle_21xx_t el1, *elp1; isp_pnhle_23xx_t el3, *elp3; isp_pnhle_24xx_t el4, *elp4; int i, j; uint32_t p; uint16_t h; MBSINIT(&mbs, MBOX_GET_ID_LIST, MBLOGALL, 250000); if (IS_24XX(isp)) { mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); mbs.param[8] = ISP_FC_SCRLEN; mbs.param[9] = chan; } else { mbs.ibits = (1 << 1)|(1 << 2)|(1 << 3)|(1 << 6); mbs.param[1] = DMA_WD1(fcp->isp_scdma); mbs.param[2] = DMA_WD0(fcp->isp_scdma); mbs.param[3] = DMA_WD3(fcp->isp_scdma); mbs.param[6] = DMA_WD2(fcp->isp_scdma); } if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } MEMORYBARRIER(isp, SYNC_SFORDEV, 0, ISP_FC_SCRLEN, chan); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { FC_SCRATCH_RELEASE(isp, chan); return (mbs.param[0] | (mbs.param[1] << 16)); } MEMORYBARRIER(isp, SYNC_SFORCPU, 0, ISP_FC_SCRLEN, chan); elp1 = fcp->isp_scratch; elp3 = fcp->isp_scratch; elp4 = fcp->isp_scratch; for (i = 0, j = 0; i < mbs.param[1] && j < *num; i++) { if (IS_24XX(isp)) { isp_get_pnhle_24xx(isp, &elp4[i], &el4); p = el4.pnhle_port_id_lo | (el4.pnhle_port_id_hi << 16); h = el4.pnhle_handle; } else if (IS_23XX(isp)) { isp_get_pnhle_23xx(isp, &elp3[i], &el3); p = el3.pnhle_port_id_lo | (el3.pnhle_port_id_hi << 16); h = el3.pnhle_handle; } else { /* 21xx */ isp_get_pnhle_21xx(isp, &elp1[i], &el1); p = el1.pnhle_port_id_lo | ((el1.pnhle_port_id_hi_handle & 0xff) << 16); h = el1.pnhle_port_id_hi_handle >> 8; } if (loop && (p >> 8) != (fcp->isp_portid >> 8)) continue; handles[j++] = h; } *num = j; FC_SCRATCH_RELEASE(isp, chan); return (0); } static void isp_dump_chip_portdb(ispsoftc_t *isp, int chan) { isp_pdb_t pdb; uint16_t lim, nphdl; isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGINFO, "Chan %d chip port dump", chan); if (ISP_CAP_2KLOGIN(isp)) { lim = NPH_MAX_2K; } else { lim = NPH_MAX; } for (nphdl = 0; nphdl != lim; nphdl++) { if (isp_getpdb(isp, chan, nphdl, &pdb)) { continue; } isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGINFO, "Chan %d Handle 0x%04x " "PortID 0x%06x WWPN 0x%02x%02x%02x%02x%02x%02x%02x%02x", chan, nphdl, pdb.portid, pdb.portname[0], pdb.portname[1], pdb.portname[2], pdb.portname[3], pdb.portname[4], pdb.portname[5], pdb.portname[6], pdb.portname[7]); } } static uint64_t isp_get_wwn(ispsoftc_t *isp, int chan, int nphdl, int nodename) { uint64_t wwn = INI_NONE; mbreg_t mbs; MBSINIT(&mbs, MBOX_GET_PORT_NAME, MBLOGALL & ~MBLOGMASK(MBOX_COMMAND_PARAM_ERROR), 500000); if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = nphdl; if (nodename) { mbs.param[10] = 1; } mbs.param[9] = chan; } else { mbs.ibitm = 3; mbs.param[1] = nphdl << 8; if (nodename) { mbs.param[1] |= 1; } } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (wwn); } if (IS_24XX(isp)) { wwn = (((uint64_t)(mbs.param[2] >> 8)) << 56) | (((uint64_t)(mbs.param[2] & 0xff)) << 48) | (((uint64_t)(mbs.param[3] >> 8)) << 40) | (((uint64_t)(mbs.param[3] & 0xff)) << 32) | (((uint64_t)(mbs.param[6] >> 8)) << 24) | (((uint64_t)(mbs.param[6] & 0xff)) << 16) | (((uint64_t)(mbs.param[7] >> 8)) << 8) | (((uint64_t)(mbs.param[7] & 0xff))); } else { wwn = (((uint64_t)(mbs.param[2] & 0xff)) << 56) | (((uint64_t)(mbs.param[2] >> 8)) << 48) | (((uint64_t)(mbs.param[3] & 0xff)) << 40) | (((uint64_t)(mbs.param[3] >> 8)) << 32) | (((uint64_t)(mbs.param[6] & 0xff)) << 24) | (((uint64_t)(mbs.param[6] >> 8)) << 16) | (((uint64_t)(mbs.param[7] & 0xff)) << 8) | (((uint64_t)(mbs.param[7] >> 8))); } return (wwn); } /* * Make sure we have good FC link. */ static int isp_fclink_test(ispsoftc_t *isp, int chan, int usdelay) { mbreg_t mbs; int i, r; uint16_t nphdl; fcparam *fcp; isp_pdb_t pdb; NANOTIME_T hra, hrb; fcp = FCPARAM(isp, chan); if (fcp->isp_loopstate < LOOP_HAVE_LINK) return (-1); if (fcp->isp_loopstate >= LOOP_LTEST_DONE) return (0); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC link test", chan); fcp->isp_loopstate = LOOP_TESTING_LINK; /* * Wait up to N microseconds for F/W to go to a ready state. */ GET_NANOTIME(&hra); while (1) { isp_change_fw_state(isp, chan, isp_fw_state(isp, chan)); if (fcp->isp_fwstate == FW_READY) { break; } if (fcp->isp_loopstate < LOOP_TESTING_LINK) goto abort; GET_NANOTIME(&hrb); if ((NANOTIME_SUB(&hrb, &hra) / 1000 + 1000 >= usdelay)) break; ISP_SLEEP(isp, 1000); } if (fcp->isp_fwstate != FW_READY) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Firmware is not ready (%s)", chan, isp_fc_fw_statename(fcp->isp_fwstate)); return (-1); } /* * Get our Loop ID and Port ID. */ MBSINIT(&mbs, MBOX_GET_LOOP_ID, MBLOGALL, 0); mbs.param[9] = chan; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (-1); } if (IS_2100(isp)) { /* * Don't bother with fabric if we are using really old * 2100 firmware. It's just not worth it. */ if (ISP_FW_NEWER_THAN(isp, 1, 15, 37)) fcp->isp_topo = TOPO_FL_PORT; else fcp->isp_topo = TOPO_NL_PORT; } else { int topo = (int) mbs.param[6]; if (topo < TOPO_NL_PORT || topo > TOPO_PTP_STUB) { topo = TOPO_PTP_STUB; } fcp->isp_topo = topo; } fcp->isp_portid = mbs.param[2] | (mbs.param[3] << 16); if (!TOPO_IS_FABRIC(fcp->isp_topo)) { fcp->isp_loopid = mbs.param[1] & 0xff; } else if (fcp->isp_topo != TOPO_F_PORT) { uint8_t alpa = fcp->isp_portid; for (i = 0; alpa_map[i]; i++) { if (alpa_map[i] == alpa) break; } if (alpa_map[i]) fcp->isp_loopid = i; } if (fcp->isp_topo == TOPO_F_PORT || fcp->isp_topo == TOPO_FL_PORT) { nphdl = IS_24XX(isp) ? NPH_FL_ID : FL_ID; r = isp_getpdb(isp, chan, nphdl, &pdb); if (r != 0 || pdb.portid == 0) { if (IS_2100(isp)) { fcp->isp_topo = TOPO_NL_PORT; } else { isp_prt(isp, ISP_LOGWARN, "fabric topology, but cannot get info about fabric controller (0x%x)", r); fcp->isp_topo = TOPO_PTP_STUB; } goto not_on_fabric; } if (IS_24XX(isp)) { fcp->isp_fabric_params = mbs.param[7]; fcp->isp_sns_hdl = NPH_SNS_ID; r = isp_register_fc4_type_24xx(isp, chan); if (fcp->isp_loopstate < LOOP_TESTING_LINK) goto abort; if (r != 0) goto not_on_fabric; r = isp_register_fc4_features_24xx(isp, chan); if (fcp->isp_loopstate < LOOP_TESTING_LINK) goto abort; if (r != 0) goto not_on_fabric; r = isp_register_port_name_24xx(isp, chan); if (fcp->isp_loopstate < LOOP_TESTING_LINK) goto abort; if (r != 0) goto not_on_fabric; isp_register_node_name_24xx(isp, chan); if (fcp->isp_loopstate < LOOP_TESTING_LINK) goto abort; } else { fcp->isp_sns_hdl = SNS_ID; r = isp_register_fc4_type(isp, chan); if (r != 0) goto not_on_fabric; if (fcp->role == ISP_ROLE_TARGET) isp_send_change_request(isp, chan); } } not_on_fabric: /* Get link speed. */ fcp->isp_gbspeed = 1; if (IS_23XX(isp) || IS_24XX(isp)) { MBSINIT(&mbs, MBOX_GET_SET_DATA_RATE, MBLOGALL, 3000000); mbs.param[1] = MBGSD_GET_RATE; /* mbs.param[2] undefined if we're just getting rate */ isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { if (mbs.param[1] == MBGSD_10GB) fcp->isp_gbspeed = 10; else if (mbs.param[1] == MBGSD_16GB) fcp->isp_gbspeed = 16; else if (mbs.param[1] == MBGSD_8GB) fcp->isp_gbspeed = 8; else if (mbs.param[1] == MBGSD_4GB) fcp->isp_gbspeed = 4; else if (mbs.param[1] == MBGSD_2GB) fcp->isp_gbspeed = 2; else if (mbs.param[1] == MBGSD_1GB) fcp->isp_gbspeed = 1; } } if (fcp->isp_loopstate < LOOP_TESTING_LINK) { abort: isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC link test aborted", chan); return (1); } fcp->isp_loopstate = LOOP_LTEST_DONE; isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGCONFIG, "Chan %d WWPN %016jx WWNN %016jx", chan, (uintmax_t)fcp->isp_wwpn, (uintmax_t)fcp->isp_wwnn); isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGCONFIG, "Chan %d %dGb %s PortID 0x%06x LoopID 0x%02x", chan, fcp->isp_gbspeed, isp_fc_toponame(fcp), fcp->isp_portid, fcp->isp_loopid); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC link test done", chan); return (0); } /* * Complete the synchronization of our Port Database. * * At this point, we've scanned the local loop (if any) and the fabric * and performed fabric logins on all new devices. * * Our task here is to go through our port database removing any entities * that are still marked probational (issuing PLOGO for ones which we had * PLOGI'd into) or are dead, and notifying upper layers about new/changed * devices. */ static int isp_pdb_sync(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; uint16_t dbidx; if (fcp->isp_loopstate < LOOP_FSCAN_DONE) return (-1); if (fcp->isp_loopstate >= LOOP_READY) return (0); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC PDB sync", chan); fcp->isp_loopstate = LOOP_SYNCING_PDB; for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &fcp->portdb[dbidx]; if (lp->state == FC_PORTDB_STATE_NIL) continue; if (lp->probational && lp->state != FC_PORTDB_STATE_ZOMBIE) lp->state = FC_PORTDB_STATE_DEAD; switch (lp->state) { case FC_PORTDB_STATE_DEAD: lp->state = FC_PORTDB_STATE_NIL; isp_async(isp, ISPASYNC_DEV_GONE, chan, lp); if (lp->autologin == 0) { (void) isp_plogx(isp, chan, lp->handle, lp->portid, PLOGX_FLG_CMD_LOGO | PLOGX_FLG_IMPLICIT | PLOGX_FLG_FREE_NPHDL); } /* * Note that we might come out of this with our state * set to FC_PORTDB_STATE_ZOMBIE. */ break; case FC_PORTDB_STATE_NEW: lp->state = FC_PORTDB_STATE_VALID; isp_async(isp, ISPASYNC_DEV_ARRIVED, chan, lp); break; case FC_PORTDB_STATE_CHANGED: lp->state = FC_PORTDB_STATE_VALID; isp_async(isp, ISPASYNC_DEV_CHANGED, chan, lp); lp->portid = lp->new_portid; lp->prli_word3 = lp->new_prli_word3; break; case FC_PORTDB_STATE_VALID: isp_async(isp, ISPASYNC_DEV_STAYED, chan, lp); break; case FC_PORTDB_STATE_ZOMBIE: break; default: isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: state %d for idx %d", lp->state, dbidx); isp_dump_portdb(isp, chan); } } if (fcp->isp_loopstate < LOOP_SYNCING_PDB) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC PDB sync aborted", chan); return (1); } fcp->isp_loopstate = LOOP_READY; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC PDB sync done", chan); return (0); } static void isp_pdb_add_update(ispsoftc_t *isp, int chan, isp_pdb_t *pdb) { fcportdb_t *lp; uint64_t wwnn, wwpn; MAKE_WWN_FROM_NODE_NAME(wwnn, pdb->nodename); MAKE_WWN_FROM_NODE_NAME(wwpn, pdb->portname); /* Search port database for the same WWPN. */ if (isp_find_pdb_by_wwpn(isp, chan, wwpn, &lp)) { if (!lp->probational) { isp_prt(isp, ISP_LOGERR, "Chan %d Port 0x%06x@0x%04x [%d] is not probational (0x%x)", chan, lp->portid, lp->handle, FC_PORTDB_TGT(isp, chan, lp), lp->state); isp_dump_portdb(isp, chan); return; } lp->probational = 0; lp->node_wwn = wwnn; /* Old device, nothing new. */ if (lp->portid == pdb->portid && lp->handle == pdb->handle && lp->prli_word3 == pdb->prli_word3) { if (lp->state != FC_PORTDB_STATE_NEW) lp->state = FC_PORTDB_STATE_VALID; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x@0x%04x is valid", chan, pdb->portid, pdb->handle); return; } /* Something has changed. */ lp->state = FC_PORTDB_STATE_CHANGED; lp->handle = pdb->handle; lp->new_portid = pdb->portid; lp->new_prli_word3 = pdb->prli_word3; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x@0x%04x is changed", chan, pdb->portid, pdb->handle); return; } /* It seems like a new port. Find an empty slot for it. */ if (!isp_find_pdb_empty(isp, chan, &lp)) { isp_prt(isp, ISP_LOGERR, "Chan %d out of portdb entries", chan); return; } ISP_MEMZERO(lp, sizeof (fcportdb_t)); lp->autologin = 1; lp->probational = 0; lp->state = FC_PORTDB_STATE_NEW; lp->portid = lp->new_portid = pdb->portid; lp->prli_word3 = lp->new_prli_word3 = pdb->prli_word3; lp->handle = pdb->handle; lp->port_wwn = wwpn; lp->node_wwn = wwnn; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x@0x%04x is new", chan, pdb->portid, pdb->handle); } /* * Fix port IDs for logged-in initiators on pre-2400 chips. * For those chips we are not receiving login events, adding initiators * based on ATIO requests, but there is no port ID in that structure. */ static void isp_fix_portids(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); isp_pdb_t pdb; uint64_t wwpn; int i, r; for (i = 0; i < MAX_FC_TARG; i++) { fcportdb_t *lp = &fcp->portdb[i]; if (lp->state == FC_PORTDB_STATE_NIL || lp->state == FC_PORTDB_STATE_ZOMBIE) continue; if (VALID_PORT(lp->portid)) continue; r = isp_getpdb(isp, chan, lp->handle, &pdb); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) return; if (r != 0) { isp_prt(isp, ISP_LOGDEBUG1, "Chan %d FC Scan Loop handle %d returned %x", chan, lp->handle, r); continue; } MAKE_WWN_FROM_NODE_NAME(wwpn, pdb.portname); if (lp->port_wwn != wwpn) continue; lp->portid = lp->new_portid = pdb.portid; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x@0x%04x is fixed", chan, pdb.portid, pdb.handle); } } /* * Scan local loop for devices. */ static int isp_scan_loop(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); int idx, lim, r; isp_pdb_t pdb; uint16_t *handles; uint16_t handle; if (fcp->isp_loopstate < LOOP_LTEST_DONE) return (-1); if (fcp->isp_loopstate >= LOOP_LSCAN_DONE) return (0); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan", chan); fcp->isp_loopstate = LOOP_SCANNING_LOOP; if (TOPO_IS_FABRIC(fcp->isp_topo)) { if (!IS_24XX(isp)) { isp_fix_portids(isp, chan); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) goto abort; } isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan done (no loop)", chan); fcp->isp_loopstate = LOOP_LSCAN_DONE; return (0); } handles = (uint16_t *)fcp->isp_scanscratch; lim = ISP_FC_SCRLEN / 2; r = isp_gethandles(isp, chan, handles, &lim, 1); if (r != 0) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Getting list of handles failed with %x", chan, r); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan done (bad)", chan); return (-1); } isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Got %d handles", chan, lim); /* * Run through the list and get the port database info for each one. */ isp_mark_portdb(isp, chan); for (idx = 0; idx < lim; idx++) { handle = handles[idx]; /* * Don't scan "special" ids. */ if (ISP_CAP_2KLOGIN(isp)) { if (handle >= NPH_RESERVED) continue; } else { if (handle >= FL_ID && handle <= SNS_ID) continue; } /* * In older cards with older f/w GET_PORT_DATABASE has been * known to hang. This trick gets around that problem. */ if (IS_2100(isp) || IS_2200(isp)) { uint64_t node_wwn = isp_get_wwn(isp, chan, handle, 1); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) { abort: isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan aborted", chan); return (1); } if (node_wwn == INI_NONE) { continue; } } /* * Get the port database entity for this index. */ r = isp_getpdb(isp, chan, handle, &pdb); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) goto abort; if (r != 0) { isp_prt(isp, ISP_LOGDEBUG1, "Chan %d FC Scan Loop handle %d returned %x", chan, handle, r); continue; } isp_pdb_add_update(isp, chan, &pdb); } if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) goto abort; fcp->isp_loopstate = LOOP_LSCAN_DONE; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan done", chan); return (0); } /* * Scan the fabric for devices and add them to our port database. * * Use the GID_FT command to get all Port IDs for FC4 SCSI devices it knows. * * For 2100-23XX cards, we can use the SNS mailbox command to pass simple * name server commands to the switch management server via the QLogic f/w. * * For the 24XX card, we have to use CT-Pass through run via the Execute IOCB * mailbox command. */ #define GIDLEN (ISP_FC_SCRLEN - (3 * QENTRY_LEN)) #define NGENT ((GIDLEN - 16) >> 2) #define XTXOFF (ISP_FC_SCRLEN - (3 * QENTRY_LEN)) /* CT request */ #define CTXOFF (ISP_FC_SCRLEN - (2 * QENTRY_LEN)) /* Request IOCB */ #define ZTXOFF (ISP_FC_SCRLEN - (1 * QENTRY_LEN)) /* Response IOCB */ static int isp_gid_ft_sns(ispsoftc_t *isp, int chan) { union { sns_gid_ft_req_t _x; uint8_t _y[SNS_GID_FT_REQ_SIZE]; } un; fcparam *fcp = FCPARAM(isp, chan); sns_gid_ft_req_t *rq = &un._x; uint8_t *scp = fcp->isp_scratch; mbreg_t mbs; isp_prt(isp, ISP_LOGDEBUG0, "Chan %d requesting GID_FT via SNS", chan); if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } ISP_MEMZERO(rq, SNS_GID_FT_REQ_SIZE); rq->snscb_rblen = GIDLEN >> 1; rq->snscb_addr[RQRSP_ADDR0015] = DMA_WD0(fcp->isp_scdma); rq->snscb_addr[RQRSP_ADDR1631] = DMA_WD1(fcp->isp_scdma); rq->snscb_addr[RQRSP_ADDR3247] = DMA_WD2(fcp->isp_scdma); rq->snscb_addr[RQRSP_ADDR4863] = DMA_WD3(fcp->isp_scdma); rq->snscb_sblen = 6; rq->snscb_cmd = SNS_GID_FT; rq->snscb_mword_div_2 = NGENT; rq->snscb_fc4_type = FC4_SCSI; isp_put_gid_ft_request(isp, rq, (sns_gid_ft_req_t *)&scp[CTXOFF]); MEMORYBARRIER(isp, SYNC_SFORDEV, CTXOFF, SNS_GID_FT_REQ_SIZE, chan); MBSINIT(&mbs, MBOX_SEND_SNS, MBLOGALL, 10000000); mbs.param[0] = MBOX_SEND_SNS; mbs.param[1] = SNS_GID_FT_REQ_SIZE >> 1; mbs.param[2] = DMA_WD1(fcp->isp_scdma + CTXOFF); mbs.param[3] = DMA_WD0(fcp->isp_scdma + CTXOFF); mbs.param[6] = DMA_WD3(fcp->isp_scdma + CTXOFF); mbs.param[7] = DMA_WD2(fcp->isp_scdma + CTXOFF); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { if (mbs.param[0] == MBOX_INVALID_COMMAND) { return (1); } else { return (-1); } } MEMORYBARRIER(isp, SYNC_SFORCPU, 0, GIDLEN, chan); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT response", GIDLEN, scp); isp_get_gid_ft_response(isp, (sns_gid_ft_rsp_t *)scp, (sns_gid_ft_rsp_t *)fcp->isp_scanscratch, NGENT); FC_SCRATCH_RELEASE(isp, chan); return (0); } static int isp_ct_passthru(ispsoftc_t *isp, int chan, uint32_t cmd_bcnt, uint32_t rsp_bcnt) { fcparam *fcp = FCPARAM(isp, chan); isp_ct_pt_t pt; void *reqp; uint8_t resp[QENTRY_LEN]; /* * Build a Passthrough IOCB in memory. */ ISP_MEMZERO(&pt, sizeof(pt)); pt.ctp_header.rqs_entry_count = 1; pt.ctp_header.rqs_entry_type = RQSTYPE_CT_PASSTHRU; pt.ctp_nphdl = fcp->isp_sns_hdl; pt.ctp_cmd_cnt = 1; pt.ctp_vpidx = ISP_GET_VPIDX(isp, chan); pt.ctp_time = 10; pt.ctp_rsp_cnt = 1; pt.ctp_rsp_bcnt = rsp_bcnt; pt.ctp_cmd_bcnt = cmd_bcnt; pt.ctp_dataseg[0].ds_base = DMA_LO32(fcp->isp_scdma+XTXOFF); pt.ctp_dataseg[0].ds_basehi = DMA_HI32(fcp->isp_scdma+XTXOFF); pt.ctp_dataseg[0].ds_count = cmd_bcnt; pt.ctp_dataseg[1].ds_base = DMA_LO32(fcp->isp_scdma); pt.ctp_dataseg[1].ds_basehi = DMA_HI32(fcp->isp_scdma); pt.ctp_dataseg[1].ds_count = rsp_bcnt; /* Prepare space for response in memory */ memset(resp, 0xff, sizeof(resp)); pt.ctp_handle = isp_allocate_handle(isp, resp, ISP_HANDLE_CTRL); if (pt.ctp_handle == 0) { isp_prt(isp, ISP_LOGERR, "%s: CTP of Chan %d out of handles", __func__, chan); return (-1); } /* Send request and wait for response. */ reqp = isp_getrqentry(isp); if (reqp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: CTP of Chan %d out of rqent", __func__, chan); isp_destroy_handle(isp, pt.ctp_handle); return (-1); } isp_put_ct_pt(isp, &pt, (isp_ct_pt_t *)reqp); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT IOCB request", QENTRY_LEN, reqp); ISP_SYNC_REQUEST(isp); if (msleep(resp, &isp->isp_lock, 0, "CTP", pt.ctp_time*hz) == EWOULDBLOCK) { isp_prt(isp, ISP_LOGERR, "%s: CTP of Chan %d timed out", __func__, chan); isp_destroy_handle(isp, pt.ctp_handle); return (-1); } if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT IOCB response", QENTRY_LEN, resp); isp_get_ct_pt(isp, (isp_ct_pt_t *)resp, &pt); if (pt.ctp_status && pt.ctp_status != RQCS_DATA_UNDERRUN) { isp_prt(isp, ISP_LOGWARN, "Chan %d GID_FT CT Passthrough returned 0x%x", chan, pt.ctp_status); return (-1); } return (0); } static int isp_gid_ft_ct_passthru(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t ct; uint32_t *rp; uint8_t *scp = fcp->isp_scratch; isp_prt(isp, ISP_LOGDEBUG0, "Chan %d requesting GID_FT via CT", chan); if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } /* * Build the CT header and command in memory. */ ISP_MEMZERO(&ct, sizeof (ct)); ct.ct_revision = CT_REVISION; ct.ct_fcs_type = CT_FC_TYPE_FC; ct.ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct.ct_cmd_resp = SNS_GID_FT; ct.ct_bcnt_resid = (GIDLEN - 16) >> 2; isp_put_ct_hdr(isp, &ct, (ct_hdr_t *) &scp[XTXOFF]); rp = (uint32_t *) &scp[XTXOFF + sizeof(ct)]; ISP_IOZPUT_32(isp, FC4_SCSI, rp); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "CT request", sizeof(ct) + sizeof(uint32_t), &scp[XTXOFF]); } if (isp_ct_passthru(isp, chan, sizeof(ct) + sizeof(uint32_t), GIDLEN)) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT response", GIDLEN, scp); isp_get_gid_ft_response(isp, (sns_gid_ft_rsp_t *)scp, (sns_gid_ft_rsp_t *)fcp->isp_scanscratch, NGENT); FC_SCRATCH_RELEASE(isp, chan); return (0); } static int isp_scan_fabric(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; uint32_t portid; uint16_t nphdl; isp_pdb_t pdb; int portidx, portlim, r; sns_gid_ft_rsp_t *rs; if (fcp->isp_loopstate < LOOP_LSCAN_DONE) return (-1); if (fcp->isp_loopstate >= LOOP_FSCAN_DONE) return (0); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan", chan); fcp->isp_loopstate = LOOP_SCANNING_FABRIC; if (!TOPO_IS_FABRIC(fcp->isp_topo)) { fcp->isp_loopstate = LOOP_FSCAN_DONE; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan done (no fabric)", chan); return (0); } if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { abort: FC_SCRATCH_RELEASE(isp, chan); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan aborted", chan); return (1); } /* * Make sure we still are logged into the fabric controller. */ nphdl = IS_24XX(isp) ? NPH_FL_ID : FL_ID; r = isp_getpdb(isp, chan, nphdl, &pdb); if ((r & 0xffff) == MBOX_NOT_LOGGED_IN) { isp_dump_chip_portdb(isp, chan); } if (r) { fcp->isp_loopstate = LOOP_LTEST_DONE; fail: isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan done (bad)", chan); return (-1); } /* Get list of port IDs from SNS. */ if (IS_24XX(isp)) r = isp_gid_ft_ct_passthru(isp, chan); else r = isp_gid_ft_sns(isp, chan); if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) goto abort; if (r > 0) { fcp->isp_loopstate = LOOP_FSCAN_DONE; return (-1); } else if (r < 0) { fcp->isp_loopstate = LOOP_LTEST_DONE; /* try again */ return (-1); } rs = (sns_gid_ft_rsp_t *) fcp->isp_scanscratch; if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) goto abort; if (rs->snscb_cthdr.ct_cmd_resp != LS_ACC) { int level; if (rs->snscb_cthdr.ct_reason == 9 && rs->snscb_cthdr.ct_explanation == 7) { level = ISP_LOG_SANCFG; } else { level = ISP_LOGWARN; } isp_prt(isp, level, "Chan %d Fabric Nameserver rejected GID_FT" " (Reason=0x%x Expl=0x%x)", chan, rs->snscb_cthdr.ct_reason, rs->snscb_cthdr.ct_explanation); fcp->isp_loopstate = LOOP_FSCAN_DONE; return (-1); } /* Check our buffer was big enough to get the full list. */ for (portidx = 0; portidx < NGENT-1; portidx++) { if (rs->snscb_ports[portidx].control & 0x80) break; } if ((rs->snscb_ports[portidx].control & 0x80) == 0) { isp_prt(isp, ISP_LOGWARN, "fabric too big for scratch area: increase ISP_FC_SCRLEN"); } portlim = portidx + 1; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Got %d ports back from name server", chan, portlim); /* Go through the list and remove duplicate port ids. */ for (portidx = 0; portidx < portlim; portidx++) { int npidx; portid = ((rs->snscb_ports[portidx].portid[0]) << 16) | ((rs->snscb_ports[portidx].portid[1]) << 8) | ((rs->snscb_ports[portidx].portid[2])); for (npidx = portidx + 1; npidx < portlim; npidx++) { uint32_t new_portid = ((rs->snscb_ports[npidx].portid[0]) << 16) | ((rs->snscb_ports[npidx].portid[1]) << 8) | ((rs->snscb_ports[npidx].portid[2])); if (new_portid == portid) { break; } } if (npidx < portlim) { rs->snscb_ports[npidx].portid[0] = 0; rs->snscb_ports[npidx].portid[1] = 0; rs->snscb_ports[npidx].portid[2] = 0; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d removing duplicate PortID 0x%06x entry from list", chan, portid); } } /* * We now have a list of Port IDs for all FC4 SCSI devices * that the Fabric Name server knows about. * * For each entry on this list go through our port database looking * for probational entries- if we find one, then an old entry is * maybe still this one. We get some information to find out. * * Otherwise, it's a new fabric device, and we log into it * (unconditionally). After searching the entire database * again to make sure that we never ever ever ever have more * than one entry that has the same PortID or the same * WWNN/WWPN duple, we enter the device into our database. */ isp_mark_portdb(isp, chan); for (portidx = 0; portidx < portlim; portidx++) { portid = ((rs->snscb_ports[portidx].portid[0]) << 16) | ((rs->snscb_ports[portidx].portid[1]) << 8) | ((rs->snscb_ports[portidx].portid[2])); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Checking fabric port 0x%06x", chan, portid); if (portid == 0) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port at idx %d is zero", chan, portidx); continue; } if (portid == fcp->isp_portid) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x is our", chan, portid); continue; } /* Now search the entire port database for the same portid. */ if (isp_find_pdb_by_portid(isp, chan, portid, &lp)) { if (!lp->probational) { isp_prt(isp, ISP_LOGERR, "Chan %d Port 0x%06x@0x%04x [%d] is not probational (0x%x)", chan, lp->portid, lp->handle, FC_PORTDB_TGT(isp, chan, lp), lp->state); isp_dump_portdb(isp, chan); goto fail; } /* * See if we're still logged into it. * * If we aren't, mark it as a dead device and * leave the new portid in the database entry * for somebody further along to decide what to * do (policy choice). * * If we are, check to see if it's the same * device still (it should be). If for some * reason it isn't, mark it as a changed device * and leave the new portid and role in the * database entry for somebody further along to * decide what to do (policy choice). */ r = isp_getpdb(isp, chan, lp->handle, &pdb); if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) goto abort; if (r != 0) { lp->state = FC_PORTDB_STATE_DEAD; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x handle 0x%x is dead (%d)", chan, portid, lp->handle, r); goto relogin; } isp_pdb_add_update(isp, chan, &pdb); continue; } relogin: if ((fcp->role & ISP_ROLE_INITIATOR) == 0) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Port 0x%06x is not logged in", chan, portid); continue; } if (isp_login_device(isp, chan, portid, &pdb, &FCPARAM(isp, 0)->isp_lasthdl)) { if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) goto abort; continue; } isp_pdb_add_update(isp, chan, &pdb); } if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) goto abort; fcp->isp_loopstate = LOOP_FSCAN_DONE; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan done", chan); return (0); } /* * Find an unused handle and try and use to login to a port. */ static int isp_login_device(ispsoftc_t *isp, int chan, uint32_t portid, isp_pdb_t *p, uint16_t *ohp) { int lim, i, r; uint16_t handle; if (ISP_CAP_2KLOGIN(isp)) { lim = NPH_MAX_2K; } else { lim = NPH_MAX; } handle = isp_next_handle(isp, ohp); for (i = 0; i < lim; i++) { if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) return (-1); /* Check if this handle is free. */ r = isp_getpdb(isp, chan, handle, p); if (r == 0) { if (p->portid != portid) { /* This handle is busy, try next one. */ handle = isp_next_handle(isp, ohp); continue; } break; } if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) return (-1); /* * Now try and log into the device */ r = isp_plogx(isp, chan, handle, portid, PLOGX_FLG_CMD_PLOGI); if (r == 0) { break; } else if ((r & 0xffff) == MBOX_PORT_ID_USED) { /* * If we get here, then the firmwware still thinks we're logged into this device, but with a different * handle. We need to break that association. We used to try and just substitute the handle, but then * failed to get any data via isp_getpdb (below). */ if (isp_plogx(isp, chan, r >> 16, portid, PLOGX_FLG_CMD_LOGO | PLOGX_FLG_IMPLICIT | PLOGX_FLG_FREE_NPHDL)) { isp_prt(isp, ISP_LOGERR, "baw... logout of %x failed", r >> 16); } if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) return (-1); r = isp_plogx(isp, chan, handle, portid, PLOGX_FLG_CMD_PLOGI); if (r != 0) i = lim; break; } else if ((r & 0xffff) == MBOX_LOOP_ID_USED) { /* Try the next handle. */ handle = isp_next_handle(isp, ohp); } else { /* Give up. */ i = lim; break; } } if (i == lim) { isp_prt(isp, ISP_LOGWARN, "Chan %d PLOGI 0x%06x failed", chan, portid); return (-1); } /* * If we successfully logged into it, get the PDB for it * so we can crosscheck that it is still what we think it * is and that we also have the role it plays */ r = isp_getpdb(isp, chan, handle, p); if (r != 0) { isp_prt(isp, ISP_LOGERR, "Chan %d new device 0x%06x@0x%x disappeared", chan, portid, handle); return (-1); } if (p->handle != handle || p->portid != portid) { isp_prt(isp, ISP_LOGERR, "Chan %d new device 0x%06x@0x%x changed (0x%06x@0x%0x)", chan, portid, handle, p->portid, p->handle); return (-1); } return (0); } static int isp_send_change_request(ispsoftc_t *isp, int chan) { mbreg_t mbs; MBSINIT(&mbs, MBOX_SEND_CHANGE_REQUEST, MBLOGALL, 500000); mbs.param[1] = 0x03; mbs.param[9] = chan; isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Send Change Request: 0x%x", chan, mbs.param[0]); return (-1); } } static int isp_register_fc4_type(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); uint8_t local[SNS_RFT_ID_REQ_SIZE]; sns_screq_t *reqp = (sns_screq_t *) local; mbreg_t mbs; ISP_MEMZERO((void *) reqp, SNS_RFT_ID_REQ_SIZE); reqp->snscb_rblen = SNS_RFT_ID_RESP_SIZE >> 1; reqp->snscb_addr[RQRSP_ADDR0015] = DMA_WD0(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR1631] = DMA_WD1(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR3247] = DMA_WD2(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR4863] = DMA_WD3(fcp->isp_scdma + 0x100); reqp->snscb_sblen = 22; reqp->snscb_data[0] = SNS_RFT_ID; reqp->snscb_data[4] = fcp->isp_portid & 0xffff; reqp->snscb_data[5] = (fcp->isp_portid >> 16) & 0xff; reqp->snscb_data[6] = (1 << FC4_SCSI); if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } isp_put_sns_request(isp, reqp, (sns_screq_t *) fcp->isp_scratch); MBSINIT(&mbs, MBOX_SEND_SNS, MBLOGALL, 1000000); mbs.param[1] = SNS_RFT_ID_REQ_SIZE >> 1; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, SNS_RFT_ID_REQ_SIZE, chan); isp_mboxcmd(isp, &mbs); FC_SCRATCH_RELEASE(isp, chan); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Register FC4 Type: 0x%x", chan, mbs.param[0]); return (-1); } } static int isp_register_fc4_type_24xx(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t *ct; rft_id_t rp; uint8_t *scp = fcp->isp_scratch; if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } /* * Build the CT header and command in memory. */ ISP_MEMZERO(&rp, sizeof(rp)); ct = &rp.rftid_hdr; ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_RFT_ID; ct->ct_bcnt_resid = (sizeof (rft_id_t) - sizeof (ct_hdr_t)) >> 2; rp.rftid_portid[0] = fcp->isp_portid >> 16; rp.rftid_portid[1] = fcp->isp_portid >> 8; rp.rftid_portid[2] = fcp->isp_portid; rp.rftid_fc4types[FC4_SCSI >> 5] = 1 << (FC4_SCSI & 0x1f); isp_put_rft_id(isp, &rp, (rft_id_t *)&scp[XTXOFF]); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT request", sizeof(rft_id_t), &scp[XTXOFF]); if (isp_ct_passthru(isp, chan, sizeof(rft_id_t), sizeof(ct_hdr_t))) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } isp_get_ct_hdr(isp, (ct_hdr_t *) scp, ct); FC_SCRATCH_RELEASE(isp, chan); if (ct->ct_cmd_resp == LS_RJT) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "Chan %d Register FC4 Type rejected", chan); return (-1); } else if (ct->ct_cmd_resp == LS_ACC) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Register FC4 Type accepted", chan); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Register FC4 Type: 0x%x", chan, ct->ct_cmd_resp); return (-1); } return (0); } static int isp_register_fc4_features_24xx(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t *ct; rff_id_t rp; uint8_t *scp = fcp->isp_scratch; if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } /* * Build the CT header and command in memory. */ ISP_MEMZERO(&rp, sizeof(rp)); ct = &rp.rffid_hdr; ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_RFF_ID; ct->ct_bcnt_resid = (sizeof (rff_id_t) - sizeof (ct_hdr_t)) >> 2; rp.rffid_portid[0] = fcp->isp_portid >> 16; rp.rffid_portid[1] = fcp->isp_portid >> 8; rp.rffid_portid[2] = fcp->isp_portid; rp.rffid_fc4features = 0; if (fcp->role & ISP_ROLE_TARGET) rp.rffid_fc4features |= 1; if (fcp->role & ISP_ROLE_INITIATOR) rp.rffid_fc4features |= 2; rp.rffid_fc4type = FC4_SCSI; isp_put_rff_id(isp, &rp, (rff_id_t *)&scp[XTXOFF]); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT request", sizeof(rft_id_t), &scp[XTXOFF]); if (isp_ct_passthru(isp, chan, sizeof(rft_id_t), sizeof(ct_hdr_t))) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } isp_get_ct_hdr(isp, (ct_hdr_t *) scp, ct); FC_SCRATCH_RELEASE(isp, chan); if (ct->ct_cmd_resp == LS_RJT) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "Chan %d Register FC4 Features rejected", chan); return (-1); } else if (ct->ct_cmd_resp == LS_ACC) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Register FC4 Features accepted", chan); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Register FC4 Features: 0x%x", chan, ct->ct_cmd_resp); return (-1); } return (0); } static int isp_register_port_name_24xx(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t *ct; rspn_id_t rp; uint8_t *scp = fcp->isp_scratch; int len; if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } /* * Build the CT header and command in memory. */ ISP_MEMZERO(&rp, sizeof(rp)); ct = &rp.rspnid_hdr; ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_RSPN_ID; rp.rspnid_portid[0] = fcp->isp_portid >> 16; rp.rspnid_portid[1] = fcp->isp_portid >> 8; rp.rspnid_portid[2] = fcp->isp_portid; rp.rspnid_length = 0; len = offsetof(rspn_id_t, rspnid_name); mtx_lock(&prison0.pr_mtx); rp.rspnid_length += sprintf(&scp[XTXOFF + len + rp.rspnid_length], "%s", prison0.pr_hostname[0] ? prison0.pr_hostname : "FreeBSD"); mtx_unlock(&prison0.pr_mtx); rp.rspnid_length += sprintf(&scp[XTXOFF + len + rp.rspnid_length], ":%s", device_get_nameunit(isp->isp_dev)); if (chan != 0) { rp.rspnid_length += sprintf(&scp[XTXOFF + len + rp.rspnid_length], "/%d", chan); } len += rp.rspnid_length; ct->ct_bcnt_resid = (len - sizeof(ct_hdr_t)) >> 2; isp_put_rspn_id(isp, &rp, (rspn_id_t *)&scp[XTXOFF]); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT request", len, &scp[XTXOFF]); if (isp_ct_passthru(isp, chan, len, sizeof(ct_hdr_t))) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } isp_get_ct_hdr(isp, (ct_hdr_t *) scp, ct); FC_SCRATCH_RELEASE(isp, chan); if (ct->ct_cmd_resp == LS_RJT) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "Chan %d Register Symbolic Port Name rejected", chan); return (-1); } else if (ct->ct_cmd_resp == LS_ACC) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Register Symbolic Port Name accepted", chan); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Register Symbolic Port Name: 0x%x", chan, ct->ct_cmd_resp); return (-1); } return (0); } static int isp_register_node_name_24xx(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); ct_hdr_t *ct; rsnn_nn_t rp; uint8_t *scp = fcp->isp_scratch; int len; if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } /* * Build the CT header and command in memory. */ ISP_MEMZERO(&rp, sizeof(rp)); ct = &rp.rsnnnn_hdr; ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_RSNN_NN; MAKE_NODE_NAME_FROM_WWN(rp.rsnnnn_nodename, fcp->isp_wwnn); rp.rsnnnn_length = 0; len = offsetof(rsnn_nn_t, rsnnnn_name); mtx_lock(&prison0.pr_mtx); rp.rsnnnn_length += sprintf(&scp[XTXOFF + len + rp.rsnnnn_length], "%s", prison0.pr_hostname[0] ? prison0.pr_hostname : "FreeBSD"); mtx_unlock(&prison0.pr_mtx); len += rp.rsnnnn_length; ct->ct_bcnt_resid = (len - sizeof(ct_hdr_t)) >> 2; isp_put_rsnn_nn(isp, &rp, (rsnn_nn_t *)&scp[XTXOFF]); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_bytes(isp, "CT request", len, &scp[XTXOFF]); if (isp_ct_passthru(isp, chan, len, sizeof(ct_hdr_t))) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } isp_get_ct_hdr(isp, (ct_hdr_t *) scp, ct); FC_SCRATCH_RELEASE(isp, chan); if (ct->ct_cmd_resp == LS_RJT) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "Chan %d Register Symbolic Node Name rejected", chan); return (-1); } else if (ct->ct_cmd_resp == LS_ACC) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Register Symbolic Node Name accepted", chan); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Register Symbolic Node Name: 0x%x", chan, ct->ct_cmd_resp); return (-1); } return (0); } static uint16_t isp_next_handle(ispsoftc_t *isp, uint16_t *ohp) { fcparam *fcp; int i, chan, wrap; uint16_t handle, minh, maxh; handle = *ohp; if (ISP_CAP_2KLOGIN(isp)) { minh = 0; maxh = NPH_RESERVED - 1; } else { minh = SNS_ID + 1; maxh = NPH_MAX - 1; } wrap = 0; next: if (handle == NIL_HANDLE) { handle = minh; } else { handle++; if (handle > maxh) { if (++wrap >= 2) { isp_prt(isp, ISP_LOGERR, "Out of port handles!"); return (NIL_HANDLE); } handle = minh; } } for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; for (i = 0; i < MAX_FC_TARG; i++) { if (fcp->portdb[i].state != FC_PORTDB_STATE_NIL && fcp->portdb[i].handle == handle) goto next; } } *ohp = handle; return (handle); } /* * Start a command. Locking is assumed done in the caller. */ int isp_start(XS_T *xs) { ispsoftc_t *isp; uint32_t cdblen; uint8_t local[QENTRY_LEN]; ispreq_t *reqp; void *cdbp, *qep; uint16_t *tptr; fcportdb_t *lp; int target, dmaresult; XS_INITERR(xs); isp = XS_ISP(xs); /* * Check command CDB length, etc.. We really are limited to 16 bytes * for Fibre Channel, but can do up to 44 bytes in parallel SCSI, * but probably only if we're running fairly new firmware (we'll * let the old f/w choke on an extended command queue entry). */ if (XS_CDBLEN(xs) > (IS_FC(isp)? 16 : 44) || XS_CDBLEN(xs) == 0) { isp_prt(isp, ISP_LOGERR, "unsupported cdb length (%d, CDB[0]=0x%x)", XS_CDBLEN(xs), XS_CDBP(xs)[0] & 0xff); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } /* * Translate the target to device handle as appropriate, checking * for correct device state as well. */ target = XS_TGT(xs); if (IS_FC(isp)) { fcparam *fcp = FCPARAM(isp, XS_CHANNEL(xs)); if ((fcp->role & ISP_ROLE_INITIATOR) == 0) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx I am not an initiator", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGERR, "Adapter not at RUNSTATE"); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } /* * Try again later. */ if (fcp->isp_loopstate != LOOP_READY) { return (CMD_RQLATER); } isp_prt(isp, ISP_LOGDEBUG2, "XS_TGT(xs)=%d", target); lp = &fcp->portdb[target]; if (target < 0 || target >= MAX_FC_TARG || lp->is_target == 0) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if (lp->state == FC_PORTDB_STATE_ZOMBIE) { isp_prt(isp, ISP_LOGDEBUG1, "%d.%d.%jx target zombie", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); return (CMD_RQLATER); } if (lp->state != FC_PORTDB_STATE_VALID) { isp_prt(isp, ISP_LOGDEBUG1, "%d.%d.%jx bad db port state 0x%x", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs), lp->state); XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } } else { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGERR, "Adapter not at RUNSTATE"); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } if (sdp->update) { isp_spi_update(isp, XS_CHANNEL(xs)); } lp = NULL; } start_again: qep = isp_getrqentry(isp); if (qep == NULL) { isp_prt(isp, ISP_LOG_WARN1, "Request Queue Overflow"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } XS_SETERR(xs, HBA_NOERROR); /* * Now see if we need to synchronize the ISP with respect to anything. * We do dual duty here (cough) for synchronizing for busses other * than which we got here to send a command to. */ reqp = (ispreq_t *) local; ISP_MEMZERO(local, QENTRY_LEN); if (ISP_TST_SENDMARKER(isp, XS_CHANNEL(xs))) { if (IS_24XX(isp)) { isp_marker_24xx_t *m = (isp_marker_24xx_t *) reqp; m->mrk_header.rqs_entry_count = 1; m->mrk_header.rqs_entry_type = RQSTYPE_MARKER; m->mrk_modifier = SYNC_ALL; m->mrk_vphdl = XS_CHANNEL(xs); isp_put_marker_24xx(isp, m, qep); } else { isp_marker_t *m = (isp_marker_t *) reqp; m->mrk_header.rqs_entry_count = 1; m->mrk_header.rqs_entry_type = RQSTYPE_MARKER; m->mrk_target = (XS_CHANNEL(xs) << 7); /* bus # */ m->mrk_modifier = SYNC_ALL; isp_put_marker(isp, m, qep); } ISP_SYNC_REQUEST(isp); ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 0); goto start_again; } reqp->req_header.rqs_entry_count = 1; /* * Select and install Header Code. * Note that it might be overridden before going out * if we're on a 64 bit platform. The lower level * code (isp_send_cmd) will select the appropriate * 64 bit variant if it needs to. */ if (IS_24XX(isp)) { reqp->req_header.rqs_entry_type = RQSTYPE_T7RQS; } else if (IS_FC(isp)) { reqp->req_header.rqs_entry_type = RQSTYPE_T2RQS; } else { if (XS_CDBLEN(xs) > 12) { reqp->req_header.rqs_entry_type = RQSTYPE_CMDONLY; } else { reqp->req_header.rqs_entry_type = RQSTYPE_REQUEST; } } /* * Set task attributes */ if (IS_24XX(isp)) { int ttype; if (XS_TAG_P(xs)) { ttype = XS_TAG_TYPE(xs); } else { if (XS_CDBP(xs)[0] == 0x3) { ttype = REQFLAG_HTAG; } else { ttype = REQFLAG_STAG; } } if (ttype == REQFLAG_OTAG) { ttype = FCP_CMND_TASK_ATTR_ORDERED; } else if (ttype == REQFLAG_HTAG) { ttype = FCP_CMND_TASK_ATTR_HEAD; } else { ttype = FCP_CMND_TASK_ATTR_SIMPLE; } ((ispreqt7_t *)reqp)->req_task_attribute = ttype; } else if (IS_FC(isp)) { /* * See comment in isp_intr */ /* XS_SET_RESID(xs, 0); */ /* * Fibre Channel always requires some kind of tag. * The Qlogic drivers seem be happy not to use a tag, * but this breaks for some devices (IBM drives). */ if (XS_TAG_P(xs)) { ((ispreqt2_t *)reqp)->req_flags = XS_TAG_TYPE(xs); } else { /* * If we don't know what tag to use, use HEAD OF QUEUE * for Request Sense or Simple. */ if (XS_CDBP(xs)[0] == 0x3) /* REQUEST SENSE */ ((ispreqt2_t *)reqp)->req_flags = REQFLAG_HTAG; else ((ispreqt2_t *)reqp)->req_flags = REQFLAG_STAG; } } else { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); if ((sdp->isp_devparam[target].actv_flags & DPARM_TQING) && XS_TAG_P(xs)) { reqp->req_flags = XS_TAG_TYPE(xs); } } tptr = &reqp->req_time; /* * NB: we do not support long CDBs (yet) */ cdblen = XS_CDBLEN(xs); if (IS_SCSI(isp)) { if (cdblen > sizeof (reqp->req_cdb)) { isp_prt(isp, ISP_LOGERR, "Command Length %u too long for this chip", cdblen); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } reqp->req_target = target | (XS_CHANNEL(xs) << 7); reqp->req_lun_trn = XS_LUN(xs); cdbp = reqp->req_cdb; reqp->req_cdblen = cdblen; } else if (IS_24XX(isp)) { ispreqt7_t *t7 = (ispreqt7_t *)local; if (cdblen > sizeof (t7->req_cdb)) { isp_prt(isp, ISP_LOGERR, "Command Length %u too long for this chip", cdblen); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } t7->req_nphdl = lp->handle; t7->req_tidlo = lp->portid; t7->req_tidhi = lp->portid >> 16; t7->req_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(xs)); #if __FreeBSD_version >= 1000700 be64enc(t7->req_lun, CAM_EXTLUN_BYTE_SWIZZLE(XS_LUN(xs))); #else if (XS_LUN(xs) >= 256) { t7->req_lun[0] = XS_LUN(xs) >> 8; t7->req_lun[0] |= 0x40; } t7->req_lun[1] = XS_LUN(xs); #endif if (FCPARAM(isp, XS_CHANNEL(xs))->fctape_enabled && (lp->prli_word3 & PRLI_WD3_RETRY)) { if (FCP_NEXT_CRN(isp, &t7->req_crn, xs)) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx cannot generate next CRN", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } } tptr = &t7->req_time; cdbp = t7->req_cdb; } else { ispreqt2_t *t2 = (ispreqt2_t *)local; if (cdblen > sizeof t2->req_cdb) { isp_prt(isp, ISP_LOGERR, "Command Length %u too long for this chip", cdblen); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } if (FCPARAM(isp, XS_CHANNEL(xs))->fctape_enabled && (lp->prli_word3 & PRLI_WD3_RETRY)) { if (FCP_NEXT_CRN(isp, &t2->req_crn, xs)) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx cannot generate next CRN", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } } if (ISP_CAP_2KLOGIN(isp)) { ispreqt2e_t *t2e = (ispreqt2e_t *)local; t2e->req_target = lp->handle; t2e->req_scclun = XS_LUN(xs); #if __FreeBSD_version < 1000700 if (XS_LUN(xs) >= 256) t2e->req_scclun |= 0x4000; #endif cdbp = t2e->req_cdb; } else if (ISP_CAP_SCCFW(isp)) { ispreqt2_t *t2 = (ispreqt2_t *)local; t2->req_target = lp->handle; t2->req_scclun = XS_LUN(xs); #if __FreeBSD_version < 1000700 if (XS_LUN(xs) >= 256) t2->req_scclun |= 0x4000; #endif cdbp = t2->req_cdb; } else { t2->req_target = lp->handle; t2->req_lun_trn = XS_LUN(xs); cdbp = t2->req_cdb; } } ISP_MEMCPY(cdbp, XS_CDBP(xs), cdblen); *tptr = (XS_TIME(xs) + 999) / 1000; if (IS_24XX(isp) && *tptr > 0x1999) { *tptr = 0x1999; } /* Whew. Thankfully the same for type 7 requests */ reqp->req_handle = isp_allocate_handle(isp, xs, ISP_HANDLE_INITIATOR); if (reqp->req_handle == 0) { isp_prt(isp, ISP_LOG_WARN1, "out of xflist pointers"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } /* * Set up DMA and/or do any platform dependent swizzling of the request entry * so that the Qlogic F/W understands what is being asked of it. * * The callee is responsible for adding all requests at this point. */ dmaresult = ISP_DMASETUP(isp, xs, reqp); if (dmaresult != CMD_QUEUED) { isp_destroy_handle(isp, reqp->req_handle); /* * dmasetup sets actual error in packet, and * return what we were given to return. */ return (dmaresult); } isp_xs_prt(isp, xs, ISP_LOGDEBUG0, "START cmd cdb[0]=0x%x datalen %ld", XS_CDBP(xs)[0], (long) XS_XFRLEN(xs)); isp->isp_nactive++; return (CMD_QUEUED); } /* * isp control * Locks (ints blocked) assumed held. */ int isp_control(ispsoftc_t *isp, ispctl_t ctl, ...) { XS_T *xs; mbreg_t *mbr, mbs; int chan, tgt; uint32_t handle; va_list ap; switch (ctl) { case ISPCTL_RESET_BUS: /* * Issue a bus reset. */ if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGERR, "BUS RESET NOT IMPLEMENTED"); break; } else if (IS_FC(isp)) { mbs.param[1] = 10; chan = 0; } else { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); mbs.param[1] = SDPARAM(isp, chan)->isp_bus_reset_delay; if (mbs.param[1] < 2) { mbs.param[1] = 2; } mbs.param[2] = chan; } MBSINIT(&mbs, MBOX_BUS_RESET, MBLOGALL, 0); ISP_SET_SENDMARKER(isp, chan, 1); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } isp_prt(isp, ISP_LOGINFO, "driver initiated bus reset of bus %d", chan); return (0); case ISPCTL_RESET_DEV: va_start(ap, ctl); chan = va_arg(ap, int); tgt = va_arg(ap, int); va_end(ap); if (IS_24XX(isp)) { uint8_t local[QENTRY_LEN]; isp24xx_tmf_t *tmf; isp24xx_statusreq_t *sp; fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; if (tgt < 0 || tgt >= MAX_FC_TARG) { isp_prt(isp, ISP_LOGWARN, "Chan %d trying to reset bad target %d", chan, tgt); break; } lp = &fcp->portdb[tgt]; if (lp->is_target == 0 || lp->state != FC_PORTDB_STATE_VALID) { isp_prt(isp, ISP_LOGWARN, "Chan %d abort of no longer valid target %d", chan, tgt); break; } tmf = (isp24xx_tmf_t *) local; ISP_MEMZERO(tmf, QENTRY_LEN); tmf->tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; tmf->tmf_header.rqs_entry_count = 1; tmf->tmf_nphdl = lp->handle; tmf->tmf_delay = 2; tmf->tmf_timeout = 4; tmf->tmf_flags = ISP24XX_TMF_TARGET_RESET; tmf->tmf_tidlo = lp->portid; tmf->tmf_tidhi = lp->portid >> 16; tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan); isp_put_24xx_tmf(isp, tmf, isp->isp_iocb); MEMORYBARRIER(isp, SYNC_IFORDEV, 0, QENTRY_LEN, chan); fcp->sendmarker = 1; isp_prt(isp, ISP_LOGALL, "Chan %d Reset N-Port Handle 0x%04x @ Port 0x%06x", chan, lp->handle, lp->portid); MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, MBCMD_DEFAULT_TIMEOUT + tmf->tmf_timeout * 1000000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(isp->isp_iocb_dma); mbs.param[3] = DMA_WD0(isp->isp_iocb_dma); mbs.param[6] = DMA_WD3(isp->isp_iocb_dma); mbs.param[7] = DMA_WD2(isp->isp_iocb_dma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) break; MEMORYBARRIER(isp, SYNC_IFORCPU, QENTRY_LEN, QENTRY_LEN, chan); sp = (isp24xx_statusreq_t *) local; isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)isp->isp_iocb)[1], sp); if (sp->req_completion_status == 0) { return (0); } isp_prt(isp, ISP_LOGWARN, "Chan %d reset of target %d returned 0x%x", chan, tgt, sp->req_completion_status); break; } else if (IS_FC(isp)) { if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = tgt; mbs.ibits = (1 << 10); } else { mbs.param[1] = (tgt << 8); } } else { mbs.param[1] = (chan << 15) | (tgt << 8); } MBSINIT(&mbs, MBOX_ABORT_TARGET, MBLOGALL, 0); mbs.param[2] = 3; /* 'delay', in seconds */ isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } isp_prt(isp, ISP_LOGINFO, "Target %d on Bus %d Reset Succeeded", tgt, chan); ISP_SET_SENDMARKER(isp, chan, 1); return (0); case ISPCTL_ABORT_CMD: va_start(ap, ctl); xs = va_arg(ap, XS_T *); va_end(ap); tgt = XS_TGT(xs); chan = XS_CHANNEL(xs); handle = isp_find_handle(isp, xs); if (handle == 0) { isp_prt(isp, ISP_LOGWARN, "cannot find handle for command to abort"); break; } if (IS_24XX(isp)) { isp24xx_abrt_t local, *ab = &local; fcparam *fcp; fcportdb_t *lp; fcp = FCPARAM(isp, chan); if (tgt < 0 || tgt >= MAX_FC_TARG) { isp_prt(isp, ISP_LOGWARN, "Chan %d trying to abort bad target %d", chan, tgt); break; } lp = &fcp->portdb[tgt]; if (lp->is_target == 0 || lp->state != FC_PORTDB_STATE_VALID) { isp_prt(isp, ISP_LOGWARN, "Chan %d abort of no longer valid target %d", chan, tgt); break; } isp_prt(isp, ISP_LOGALL, "Chan %d Abort Cmd for N-Port 0x%04x @ Port 0x%06x", chan, lp->handle, lp->portid); ISP_MEMZERO(ab, QENTRY_LEN); ab->abrt_header.rqs_entry_type = RQSTYPE_ABORT_IO; ab->abrt_header.rqs_entry_count = 1; ab->abrt_handle = lp->handle; ab->abrt_cmd_handle = handle; ab->abrt_tidlo = lp->portid; ab->abrt_tidhi = lp->portid >> 16; ab->abrt_vpidx = ISP_GET_VPIDX(isp, chan); isp_put_24xx_abrt(isp, ab, isp->isp_iocb); MEMORYBARRIER(isp, SYNC_IFORDEV, 0, 2 * QENTRY_LEN, chan); ISP_MEMZERO(&mbs, sizeof (mbs)); MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(isp->isp_iocb_dma); mbs.param[3] = DMA_WD0(isp->isp_iocb_dma); mbs.param[6] = DMA_WD3(isp->isp_iocb_dma); mbs.param[7] = DMA_WD2(isp->isp_iocb_dma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) break; MEMORYBARRIER(isp, SYNC_IFORCPU, QENTRY_LEN, QENTRY_LEN, chan); isp_get_24xx_abrt(isp, &((isp24xx_abrt_t *)isp->isp_iocb)[1], ab); if (ab->abrt_nphdl == ISP24XX_ABRT_OKAY) { return (0); } isp_prt(isp, ISP_LOGWARN, "Chan %d handle %d abort returned 0x%x", chan, tgt, ab->abrt_nphdl); break; } else if (IS_FC(isp)) { if (ISP_CAP_SCCFW(isp)) { if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = tgt; } else { mbs.param[1] = tgt << 8; } mbs.param[6] = XS_LUN(xs); } else { mbs.param[1] = tgt << 8 | XS_LUN(xs); } } else { mbs.param[1] = (chan << 15) | (tgt << 8) | XS_LUN(xs); } MBSINIT(&mbs, MBOX_ABORT, MBLOGALL & ~MBLOGMASK(MBOX_COMMAND_ERROR), 0); mbs.param[2] = handle; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } return (0); case ISPCTL_UPDATE_PARAMS: va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); isp_spi_update(isp, chan); return (0); case ISPCTL_FCLINK_TEST: if (IS_FC(isp)) { int usdelay; va_start(ap, ctl); chan = va_arg(ap, int); usdelay = va_arg(ap, int); va_end(ap); if (usdelay == 0) { usdelay = 250000; } return (isp_fclink_test(isp, chan, usdelay)); } break; case ISPCTL_SCAN_FABRIC: if (IS_FC(isp)) { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); return (isp_scan_fabric(isp, chan)); } break; case ISPCTL_SCAN_LOOP: if (IS_FC(isp)) { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); return (isp_scan_loop(isp, chan)); } break; case ISPCTL_PDB_SYNC: if (IS_FC(isp)) { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); return (isp_pdb_sync(isp, chan)); } break; case ISPCTL_SEND_LIP: if (IS_FC(isp) && !IS_24XX(isp)) { MBSINIT(&mbs, MBOX_INIT_LIP, MBLOGALL, 0); if (ISP_CAP_2KLOGIN(isp)) { mbs.ibits = (1 << 10); } isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } } break; case ISPCTL_GET_PDB: if (IS_FC(isp)) { isp_pdb_t *pdb; va_start(ap, ctl); chan = va_arg(ap, int); tgt = va_arg(ap, int); pdb = va_arg(ap, isp_pdb_t *); va_end(ap); return (isp_getpdb(isp, chan, tgt, pdb)); } break; case ISPCTL_GET_NAMES: { uint64_t *wwnn, *wwnp; va_start(ap, ctl); chan = va_arg(ap, int); tgt = va_arg(ap, int); wwnn = va_arg(ap, uint64_t *); wwnp = va_arg(ap, uint64_t *); va_end(ap); if (wwnn == NULL && wwnp == NULL) { break; } if (wwnn) { *wwnn = isp_get_wwn(isp, chan, tgt, 1); if (*wwnn == INI_NONE) { break; } } if (wwnp) { *wwnp = isp_get_wwn(isp, chan, tgt, 0); if (*wwnp == INI_NONE) { break; } } return (0); } case ISPCTL_RUN_MBOXCMD: { va_start(ap, ctl); mbr = va_arg(ap, mbreg_t *); va_end(ap); isp_mboxcmd(isp, mbr); return (0); } case ISPCTL_PLOGX: { isp_plcmd_t *p; int r; va_start(ap, ctl); p = va_arg(ap, isp_plcmd_t *); va_end(ap); if ((p->flags & PLOGX_FLG_CMD_MASK) != PLOGX_FLG_CMD_PLOGI || (p->handle != NIL_HANDLE)) { return (isp_plogx(isp, p->channel, p->handle, p->portid, p->flags)); } do { isp_next_handle(isp, &p->handle); r = isp_plogx(isp, p->channel, p->handle, p->portid, p->flags); if ((r & 0xffff) == MBOX_PORT_ID_USED) { p->handle = r >> 16; r = 0; break; } } while ((r & 0xffff) == MBOX_LOOP_ID_USED); return (r); } case ISPCTL_CHANGE_ROLE: if (IS_FC(isp)) { int role, r; va_start(ap, ctl); chan = va_arg(ap, int); role = va_arg(ap, int); va_end(ap); r = isp_fc_change_role(isp, chan, role); return (r); } break; default: isp_prt(isp, ISP_LOGERR, "Unknown Control Opcode 0x%x", ctl); break; } return (-1); } /* * Interrupt Service Routine(s). * * External (OS) framework has done the appropriate locking, * and the locking will be held throughout this function. */ /* * Limit our stack depth by sticking with the max likely number * of completions on a request queue at any one time. */ #ifndef MAX_REQUESTQ_COMPLETIONS #define MAX_REQUESTQ_COMPLETIONS 32 #endif void isp_intr(ispsoftc_t *isp, uint16_t isr, uint16_t sema, uint16_t info) { XS_T *complist[MAX_REQUESTQ_COMPLETIONS], *xs; uint32_t iptr, optr, junk; int i, nlooked = 0, ndone = 0, continuations_expected = 0; int etype, last_etype = 0; again: /* * Is this a mailbox related interrupt? * The mailbox semaphore will be nonzero if so. */ if (sema) { fmbox: if (info & MBOX_COMMAND_COMPLETE) { isp->isp_intmboxc++; if (isp->isp_mboxbsy) { int obits = isp->isp_obits; isp->isp_mboxtmp[0] = info; for (i = 1; i < ISP_NMBOX(isp); i++) { if ((obits & (1 << i)) == 0) { continue; } isp->isp_mboxtmp[i] = ISP_READ(isp, MBOX_OFF(i)); } if (isp->isp_mbxwrk0) { if (isp_mbox_continue(isp) == 0) { return; } } MBOX_NOTIFY_COMPLETE(isp); } else { isp_prt(isp, ISP_LOGWARN, "mailbox cmd (0x%x) with no waiters", info); } } else { i = IS_FC(isp)? isp_parse_async_fc(isp, info) : isp_parse_async(isp, info); if (i < 0) { return; } } if ((IS_FC(isp) && info != ASYNC_RIOZIO_STALL) || isp->isp_state != ISP_RUNSTATE) { goto out; } } /* * We can't be getting this now. */ if (isp->isp_state != ISP_RUNSTATE) { /* * This seems to happen to 23XX and 24XX cards- don't know why. */ if (isp->isp_mboxbsy && isp->isp_lastmbxcmd == MBOX_ABOUT_FIRMWARE) { goto fmbox; } isp_prt(isp, ISP_LOGINFO, "interrupt (ISR=%x SEMA=%x INFO=%x) " "when not ready", isr, sema, info); /* * Thank you very much! *Burrrp*! */ isp->isp_residx = ISP_READ(isp, isp->isp_respinrp); isp->isp_resodx = isp->isp_residx; ISP_WRITE(isp, isp->isp_respoutrp, isp->isp_resodx); if (IS_24XX(isp)) { ISP_DISABLE_INTS(isp); } goto out; } #ifdef ISP_TARGET_MODE /* * Check for ATIO Queue entries. */ if (IS_24XX(isp) && (isr == ISPR2HST_ATIO_UPDATE || isr == ISPR2HST_ATIO_RSPQ_UPDATE || isr == ISPR2HST_ATIO_UPDATE2)) { iptr = ISP_READ(isp, BIU2400_ATIO_RSPINP); optr = isp->isp_atioodx; while (optr != iptr) { uint8_t qe[QENTRY_LEN]; isphdr_t *hp; uint32_t oop; void *addr; oop = optr; MEMORYBARRIER(isp, SYNC_ATIOQ, oop, QENTRY_LEN, -1); addr = ISP_QUEUE_ENTRY(isp->isp_atioq, oop); isp_get_hdr(isp, addr, (isphdr_t *)qe); hp = (isphdr_t *)qe; switch (hp->rqs_entry_type) { case RQSTYPE_NOTIFY: case RQSTYPE_ATIO: (void) isp_target_notify(isp, addr, &oop); break; default: isp_print_qentry(isp, "?ATIOQ entry?", oop, addr); break; } optr = ISP_NXT_QENTRY(oop, RESULT_QUEUE_LEN(isp)); } if (isp->isp_atioodx != optr) { ISP_WRITE(isp, BIU2400_ATIO_RSPOUTP, optr); isp->isp_atioodx = optr; } } #endif /* * You *must* read the Response Queue In Pointer * prior to clearing the RISC interrupt. * * Debounce the 2300 if revision less than 2. */ if (IS_2100(isp) || (IS_2300(isp) && isp->isp_revision < 2)) { i = 0; do { iptr = ISP_READ(isp, isp->isp_respinrp); junk = ISP_READ(isp, isp->isp_respinrp); } while (junk != iptr && ++i < 1000); if (iptr != junk) { isp_prt(isp, ISP_LOGWARN, "Response Queue Out Pointer Unstable (%x, %x)", iptr, junk); goto out; } } else { iptr = ISP_READ(isp, isp->isp_respinrp); } optr = isp->isp_resodx; if (optr == iptr && sema == 0) { /* * There are a lot of these- reasons unknown- mostly on * faster Alpha machines. * * I tried delaying after writing HCCR_CMD_CLEAR_RISC_INT to * make sure the old interrupt went away (to avoid 'ringing' * effects), but that didn't stop this from occurring. */ if (IS_24XX(isp)) { junk = 0; } else if (IS_23XX(isp)) { ISP_DELAY(100); iptr = ISP_READ(isp, isp->isp_respinrp); junk = ISP_READ(isp, BIU_R2HSTSLO); } else { junk = ISP_READ(isp, BIU_ISR); } if (optr == iptr) { if (IS_23XX(isp) || IS_24XX(isp)) { ; } else { sema = ISP_READ(isp, BIU_SEMA); info = ISP_READ(isp, OUTMAILBOX0); if ((sema & 0x3) && (info & 0x8000)) { goto again; } } isp->isp_intbogus++; isp_prt(isp, ISP_LOGDEBUG1, "bogus intr- isr %x (%x) iptr %x optr %x", isr, junk, iptr, optr); } } isp->isp_residx = iptr; while (optr != iptr) { uint8_t qe[QENTRY_LEN]; ispstatusreq_t *sp = (ispstatusreq_t *) qe; isphdr_t *hp; int buddaboom, scsi_status, completion_status; int req_status_flags, req_state_flags; uint8_t *snsp, *resp; uint32_t rlen, slen, totslen; long resid; uint16_t oop; hp = (isphdr_t *) ISP_QUEUE_ENTRY(isp->isp_result, optr); oop = optr; optr = ISP_NXT_QENTRY(optr, RESULT_QUEUE_LEN(isp)); nlooked++; read_again: buddaboom = req_status_flags = req_state_flags = 0; resid = 0L; /* * Synchronize our view of this response queue entry. */ MEMORYBARRIER(isp, SYNC_RESULT, oop, QENTRY_LEN, -1); if (isp->isp_dblev & ISP_LOGDEBUG1) isp_print_qentry(isp, "Response Queue Entry", oop, hp); isp_get_hdr(isp, hp, &sp->req_header); etype = sp->req_header.rqs_entry_type; if (IS_24XX(isp) && etype == RQSTYPE_RESPONSE) { isp24xx_statusreq_t *sp2 = (isp24xx_statusreq_t *)qe; isp_get_24xx_response(isp, (isp24xx_statusreq_t *)hp, sp2); scsi_status = sp2->req_scsi_status; completion_status = sp2->req_completion_status; if ((scsi_status & 0xff) != 0) req_state_flags = RQSF_GOT_STATUS; else req_state_flags = 0; resid = sp2->req_resid; } else if (etype == RQSTYPE_RESPONSE) { isp_get_response(isp, (ispstatusreq_t *) hp, sp); scsi_status = sp->req_scsi_status; completion_status = sp->req_completion_status; req_status_flags = sp->req_status_flags; req_state_flags = sp->req_state_flags; resid = sp->req_resid; } else if (etype == RQSTYPE_RIO1) { isp_rio1_t *rio = (isp_rio1_t *) qe; isp_get_rio1(isp, (isp_rio1_t *) hp, rio); for (i = 0; i < rio->req_header.rqs_seqno; i++) { isp_fastpost_complete(isp, rio->req_handles[i]); } if (isp->isp_fpcchiwater < rio->req_header.rqs_seqno) { isp->isp_fpcchiwater = rio->req_header.rqs_seqno; } ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } else if (etype == RQSTYPE_RIO2) { isp_prt(isp, ISP_LOGERR, "dropping RIO2 response"); ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } else if (etype == RQSTYPE_STATUS_CONT) { isp_get_cont_response(isp, (ispstatus_cont_t *) hp, (ispstatus_cont_t *) sp); if (last_etype == RQSTYPE_RESPONSE && continuations_expected && ndone > 0 && (xs = complist[ndone-1]) != NULL) { ispstatus_cont_t *scp = (ispstatus_cont_t *) sp; XS_SENSE_APPEND(xs, scp->req_sense_data, sizeof (scp->req_sense_data)); isp_prt(isp, ISP_LOGDEBUG0|ISP_LOG_CWARN, "%d more Status Continuations expected", --continuations_expected); } else { isp_prt(isp, ISP_LOG_WARN1, "Ignored Continuation Response"); } ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ continue; } else { /* * Somebody reachable via isp_handle_other_response * may have updated the response queue pointers for * us, so we reload our goal index. */ int r; uint32_t tsto = oop; r = isp_handle_other_response(isp, etype, hp, &tsto); if (r < 0) { goto read_again; } /* * If somebody updated the output pointer, then reset * optr to be one more than the updated amount. */ while (tsto != oop) { optr = ISP_NXT_QENTRY(tsto, RESULT_QUEUE_LEN(isp)); } if (r > 0) { ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } /* * After this point, we'll just look at the header as * we don't know how to deal with the rest of the * response. */ /* * It really has to be a bounced request just copied * from the request queue to the response queue. If * not, something bad has happened. */ if (etype != RQSTYPE_REQUEST) { isp_prt(isp, ISP_LOGERR, notresp, etype, oop, optr, nlooked); ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } buddaboom = 1; scsi_status = sp->req_scsi_status; completion_status = sp->req_completion_status; req_status_flags = sp->req_status_flags; req_state_flags = sp->req_state_flags; resid = sp->req_resid; } if (sp->req_header.rqs_flags & RQSFLAG_MASK) { if (sp->req_header.rqs_flags & RQSFLAG_CONTINUATION) { isp_print_qentry(isp, "unexpected continuation segment", oop, hp); last_etype = etype; continue; } if (sp->req_header.rqs_flags & RQSFLAG_FULL) { isp_prt(isp, ISP_LOG_WARN1, "internal queues full"); /* * We'll synthesize a QUEUE FULL message below. */ } if (sp->req_header.rqs_flags & RQSFLAG_BADHEADER) { isp_print_qentry(isp, "bad header flag", oop, hp); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADPACKET) { isp_print_qentry(isp, "bad request packet", oop, hp); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADCOUNT) { isp_print_qentry(isp, "invalid entry count", oop, hp); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADORDER) { isp_print_qentry(isp, "invalid IOCB ordering", oop, hp); last_etype = etype; continue; } } xs = isp_find_xs(isp, sp->req_handle); if (xs == NULL) { uint8_t ts = completion_status & 0xff; /* * Only whine if this isn't the expected fallout of * aborting the command or resetting the target. */ if (etype != RQSTYPE_RESPONSE) { isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x (type 0x%x)", sp->req_handle, etype); } else if (ts != RQCS_ABORTED && ts != RQCS_RESET_OCCURRED) { isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x (status 0x%x)", sp->req_handle, ts); } ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } if (req_status_flags & RQSTF_BUS_RESET) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx bus was reset", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BUSRESET); ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 1); } if (buddaboom) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx buddaboom", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BOTCH); } resp = NULL; rlen = 0; snsp = NULL; totslen = slen = 0; if (IS_24XX(isp) && (scsi_status & (RQCS_RV|RQCS_SV)) != 0) { resp = ((isp24xx_statusreq_t *)sp)->req_rsp_sense; rlen = ((isp24xx_statusreq_t *)sp)->req_response_len; } else if (IS_FC(isp) && (scsi_status & RQCS_RV) != 0) { resp = sp->req_response; rlen = sp->req_response_len; } if (IS_FC(isp) && (scsi_status & RQCS_SV) != 0) { /* * Fibre Channel F/W doesn't say we got status * if there's Sense Data instead. I guess they * think it goes w/o saying. */ req_state_flags |= RQSF_GOT_STATUS|RQSF_GOT_SENSE; if (IS_24XX(isp)) { snsp = ((isp24xx_statusreq_t *)sp)->req_rsp_sense; snsp += rlen; totslen = ((isp24xx_statusreq_t *)sp)->req_sense_len; slen = (sizeof (((isp24xx_statusreq_t *)sp)->req_rsp_sense)) - rlen; if (totslen < slen) slen = totslen; } else { snsp = sp->req_sense_data; totslen = sp->req_sense_len; slen = sizeof (sp->req_sense_data); if (totslen < slen) slen = totslen; } } else if (IS_SCSI(isp) && (req_state_flags & RQSF_GOT_SENSE)) { snsp = sp->req_sense_data; totslen = sp->req_sense_len; slen = sizeof (sp->req_sense_data); if (totslen < slen) slen = totslen; } if (req_state_flags & RQSF_GOT_STATUS) { *XS_STSP(xs) = scsi_status & 0xff; } switch (etype) { case RQSTYPE_RESPONSE: if (resp && rlen >= 4 && resp[FCP_RSPNS_CODE_OFFSET] != 0) { const char *ptr; char lb[64]; const char *rnames[10] = { "Task Management function complete", "FCP_DATA length different than FCP_BURST_LEN", "FCP_CMND fields invalid", "FCP_DATA parameter mismatch with FCP_DATA_RO", "Task Management function rejected", "Task Management function failed", NULL, NULL, "Task Management function succeeded", "Task Management function incorrect logical unit number", }; uint8_t code = resp[FCP_RSPNS_CODE_OFFSET]; if (code >= 10 || rnames[code] == NULL) { ISP_SNPRINTF(lb, sizeof(lb), "Unknown FCP Response Code 0x%x", code); ptr = lb; } else { ptr = rnames[code]; } isp_xs_prt(isp, xs, ISP_LOGWARN, "FCP RESPONSE, LENGTH %u: %s CDB0=0x%02x", rlen, ptr, XS_CDBP(xs)[0] & 0xff); if (code != 0 && code != 8) XS_SETERR(xs, HBA_BOTCH); } if (IS_24XX(isp)) { isp_parse_status_24xx(isp, (isp24xx_statusreq_t *)sp, xs, &resid); } else { isp_parse_status(isp, (void *)sp, xs, &resid); } if ((XS_NOERR(xs) || XS_ERR(xs) == HBA_NOERROR) && (*XS_STSP(xs) == SCSI_BUSY)) { XS_SETERR(xs, HBA_TGTBSY); } if (IS_SCSI(isp)) { XS_SET_RESID(xs, resid); /* * A new synchronous rate was negotiated for * this target. Mark state such that we'll go * look up that which has changed later. */ if (req_status_flags & RQSTF_NEGOTIATION) { int t = XS_TGT(xs); sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp->isp_devparam[t].dev_refresh = 1; sdp->update = 1; } } else { if (req_status_flags & RQSF_XFER_COMPLETE) { XS_SET_RESID(xs, 0); } else if (scsi_status & RQCS_RESID) { XS_SET_RESID(xs, resid); } else { XS_SET_RESID(xs, 0); } } if (snsp && slen) { if (totslen > slen) { continuations_expected += ((totslen - slen + QENTRY_LEN - 5) / (QENTRY_LEN - 4)); if (ndone > (MAX_REQUESTQ_COMPLETIONS - continuations_expected - 1)) { /* we'll lose some stats, but that's a small price to pay */ for (i = 0; i < ndone; i++) { if (complist[i]) { isp->isp_rsltccmplt++; isp_done(complist[i]); } } ndone = 0; } isp_prt(isp, ISP_LOGDEBUG0|ISP_LOG_CWARN, "Expecting %d more Status Continuations for total sense length of %u", continuations_expected, totslen); } XS_SAVE_SENSE(xs, snsp, totslen, slen); } else if ((req_status_flags & RQSF_GOT_STATUS) && (scsi_status & 0xff) == SCSI_CHECK && IS_FC(isp)) { isp_prt(isp, ISP_LOGWARN, "CHECK CONDITION w/o sense data for CDB=0x%x", XS_CDBP(xs)[0] & 0xff); isp_print_qentry(isp, "CC with no Sense", oop, hp); } isp_prt(isp, ISP_LOGDEBUG2, "asked for %ld got raw resid %ld settled for %ld", (long) XS_XFRLEN(xs), resid, (long) XS_GET_RESID(xs)); break; case RQSTYPE_REQUEST: case RQSTYPE_A64: case RQSTYPE_T2RQS: case RQSTYPE_T3RQS: case RQSTYPE_T7RQS: if (!IS_24XX(isp) && (sp->req_header.rqs_flags & RQSFLAG_FULL)) { /* * Force Queue Full status. */ *XS_STSP(xs) = SCSI_QFULL; XS_SETERR(xs, HBA_NOERROR); } else if (XS_NOERR(xs)) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx badness at %s:%u", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs), __func__, __LINE__); XS_SETERR(xs, HBA_BOTCH); } XS_SET_RESID(xs, XS_XFRLEN(xs)); break; default: isp_print_qentry(isp, "Unhandled Response Type", oop, hp); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } break; } /* * Free any DMA resources. As a side effect, this may * also do any cache flushing necessary for data coherence. */ if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, sp->req_handle); } isp_destroy_handle(isp, sp->req_handle); if (isp->isp_nactive > 0) { isp->isp_nactive--; } complist[ndone++] = xs; /* defer completion call until later */ ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; if (ndone == MAX_REQUESTQ_COMPLETIONS) { break; } } /* * If we looked at any commands, then it's valid to find out * what the outpointer is. It also is a trigger to update the * ISP's notion of what we've seen so far. */ if (nlooked) { ISP_WRITE(isp, isp->isp_respoutrp, optr); isp->isp_resodx = optr; if (isp->isp_rscchiwater < ndone) isp->isp_rscchiwater = ndone; } out: if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU_SEMA, 0); } for (i = 0; i < ndone; i++) { xs = complist[i]; if (xs) { if (((isp->isp_dblev & (ISP_LOGDEBUG1|ISP_LOGDEBUG2|ISP_LOGDEBUG3))) || ((isp->isp_dblev & (ISP_LOGDEBUG0|ISP_LOG_CWARN) && ((!XS_NOERR(xs)) || (*XS_STSP(xs) != SCSI_GOOD))))) { isp_prt_endcmd(isp, xs); } isp->isp_rsltccmplt++; isp_done(xs); } } } /* * Support routines. */ void isp_prt_endcmd(ispsoftc_t *isp, XS_T *xs) { char cdbstr[16 * 5 + 1]; int i, lim; lim = XS_CDBLEN(xs) > 16? 16 : XS_CDBLEN(xs); ISP_SNPRINTF(cdbstr, sizeof (cdbstr), "0x%02x ", XS_CDBP(xs)[0]); for (i = 1; i < lim; i++) { ISP_SNPRINTF(cdbstr, sizeof (cdbstr), "%s0x%02x ", cdbstr, XS_CDBP(xs)[i]); } if (XS_SENSE_VALID(xs)) { isp_xs_prt(isp, xs, ISP_LOGALL, "FIN dl%d resid %ld CDB=%s SenseLength=%u/%u KEY/ASC/ASCQ=0x%02x/0x%02x/0x%02x", XS_XFRLEN(xs), (long) XS_GET_RESID(xs), cdbstr, XS_CUR_SNSLEN(xs), XS_TOT_SNSLEN(xs), XS_SNSKEY(xs), XS_SNSASC(xs), XS_SNSASCQ(xs)); } else { isp_xs_prt(isp, xs, ISP_LOGALL, "FIN dl%d resid %ld CDB=%s STS 0x%x XS_ERR=0x%x", XS_XFRLEN(xs), (long) XS_GET_RESID(xs), cdbstr, *XS_STSP(xs), XS_ERR(xs)); } } /* * Parse an ASYNC mailbox complete * * Return non-zero if the event has been acknowledged. */ static int isp_parse_async(ispsoftc_t *isp, uint16_t mbox) { int acked = 0; uint32_t h1 = 0, h2 = 0; uint16_t chan = 0; /* * Pick up the channel, but not if this is a ASYNC_RIO32_2, * where Mailboxes 6/7 have the second handle. */ if (mbox != ASYNC_RIO32_2) { if (IS_DUALBUS(isp)) { chan = ISP_READ(isp, OUTMAILBOX6); } } isp_prt(isp, ISP_LOGDEBUG2, "Async Mbox 0x%x", mbox); switch (mbox) { case ASYNC_BUS_RESET: ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif isp_async(isp, ISPASYNC_BUS_RESET, chan); break; case ASYNC_SYSTEM_ERROR: isp->isp_dead = 1; isp->isp_state = ISP_CRASHED; /* * Were we waiting for a mailbox command to complete? * If so, it's dead, so wake up the waiter. */ if (isp->isp_mboxbsy) { isp->isp_obits = 1; isp->isp_mboxtmp[0] = MBOX_HOST_INTERFACE_ERROR; MBOX_NOTIFY_COMPLETE(isp); } /* * It's up to the handler for isp_async to reinit stuff and * restart the firmware */ isp_async(isp, ISPASYNC_FW_CRASH); acked = 1; break; case ASYNC_RQS_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Request Queue Transfer Error"); break; case ASYNC_RSP_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Response Queue Transfer Error"); break; case ASYNC_QWAKEUP: /* * We've just been notified that the Queue has woken up. * We don't need to be chatty about this- just unlatch things * and move on. */ mbox = ISP_READ(isp, isp->isp_rqstoutrp); break; case ASYNC_TIMEOUT_RESET: isp_prt(isp, ISP_LOGWARN, "timeout initiated SCSI bus reset of chan %d", chan); ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif break; case ASYNC_DEVICE_RESET: isp_prt(isp, ISP_LOGINFO, "device reset on chan %d", chan); ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif break; case ASYNC_EXTMSG_UNDERRUN: isp_prt(isp, ISP_LOGWARN, "extended message underrun"); break; case ASYNC_SCAM_INT: isp_prt(isp, ISP_LOGINFO, "SCAM interrupt"); break; case ASYNC_HUNG_SCSI: isp_prt(isp, ISP_LOGERR, "stalled SCSI Bus after DATA Overrun"); /* XXX: Need to issue SCSI reset at this point */ break; case ASYNC_KILLED_BUS: isp_prt(isp, ISP_LOGERR, "SCSI Bus reset after DATA Overrun"); break; case ASYNC_BUS_TRANSIT: mbox = ISP_READ(isp, OUTMAILBOX2); switch (mbox & SXP_PINS_MODE_MASK) { case SXP_PINS_LVD_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to LVD mode"); SDPARAM(isp, chan)->isp_diffmode = 0; SDPARAM(isp, chan)->isp_ultramode = 0; SDPARAM(isp, chan)->isp_lvdmode = 1; break; case SXP_PINS_HVD_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to Differential mode"); SDPARAM(isp, chan)->isp_diffmode = 1; SDPARAM(isp, chan)->isp_ultramode = 0; SDPARAM(isp, chan)->isp_lvdmode = 0; break; case SXP_PINS_SE_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to Single Ended mode"); SDPARAM(isp, chan)->isp_diffmode = 0; SDPARAM(isp, chan)->isp_ultramode = 1; SDPARAM(isp, chan)->isp_lvdmode = 0; break; default: isp_prt(isp, ISP_LOGWARN, "Transition to Unknown Mode 0x%x", mbox); break; } /* * XXX: Set up to renegotiate again! */ /* Can only be for a 1080... */ ISP_SET_SENDMARKER(isp, chan, 1); break; case ASYNC_CMD_CMPLT: case ASYNC_RIO32_1: if (!IS_ULTRA3(isp)) { isp_prt(isp, ISP_LOGERR, "unexpected fast posting completion"); break; } /* FALLTHROUGH */ h1 = (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1); break; case ASYNC_RIO32_2: h1 = (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1); h2 = (ISP_READ(isp, OUTMAILBOX7) << 16) | ISP_READ(isp, OUTMAILBOX6); break; case ASYNC_RIO16_5: case ASYNC_RIO16_4: case ASYNC_RIO16_3: case ASYNC_RIO16_2: case ASYNC_RIO16_1: isp_prt(isp, ISP_LOGERR, "unexpected 16 bit RIO handle"); break; default: isp_prt(isp, ISP_LOGWARN, "%s: unhandled async code 0x%x", __func__, mbox); break; } if (h1 || h2) { isp_prt(isp, ISP_LOGDEBUG3, "fast post/rio completion of 0x%08x", h1); isp_fastpost_complete(isp, h1); if (h2) { isp_prt(isp, ISP_LOGDEBUG3, "fast post/rio completion of 0x%08x", h2); isp_fastpost_complete(isp, h2); if (isp->isp_fpcchiwater < 2) { isp->isp_fpcchiwater = 2; } } else { if (isp->isp_fpcchiwater < 1) { isp->isp_fpcchiwater = 1; } } } else { isp->isp_intoasync++; } return (acked); } static int isp_parse_async_fc(ispsoftc_t *isp, uint16_t mbox) { fcparam *fcp; int acked = 0; uint16_t chan; if (IS_DUALBUS(isp)) { chan = ISP_READ(isp, OUTMAILBOX6); } else { chan = 0; } isp_prt(isp, ISP_LOGDEBUG2, "Async Mbox 0x%x", mbox); switch (mbox) { case ASYNC_SYSTEM_ERROR: isp->isp_dead = 1; isp->isp_state = ISP_CRASHED; FCPARAM(isp, chan)->isp_loopstate = LOOP_NIL; isp_change_fw_state(isp, chan, FW_CONFIG_WAIT); /* * Were we waiting for a mailbox command to complete? * If so, it's dead, so wake up the waiter. */ if (isp->isp_mboxbsy) { isp->isp_obits = 1; isp->isp_mboxtmp[0] = MBOX_HOST_INTERFACE_ERROR; MBOX_NOTIFY_COMPLETE(isp); } /* * It's up to the handler for isp_async to reinit stuff and * restart the firmware */ isp_async(isp, ISPASYNC_FW_CRASH); acked = 1; break; case ASYNC_RQS_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Request Queue Transfer Error"); break; case ASYNC_RSP_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Response Queue Transfer Error"); break; case ASYNC_QWAKEUP: #ifdef ISP_TARGET_MODE if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGERR, "ATIO Queue Transfer Error"); break; } #endif isp_prt(isp, ISP_LOGERR, "%s: unexpected ASYNC_QWAKEUP code", __func__); break; case ASYNC_CMD_CMPLT: isp_fastpost_complete(isp, (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1)); if (isp->isp_fpcchiwater < 1) { isp->isp_fpcchiwater = 1; } break; case ASYNC_RIOZIO_STALL: break; case ASYNC_CTIO_DONE: #ifdef ISP_TARGET_MODE if (isp_target_async(isp, (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1), mbox)) { acked = 1; } else { isp->isp_fphccmplt++; } #else isp_prt(isp, ISP_LOGWARN, "unexpected ASYNC CTIO done"); #endif break; case ASYNC_LIP_ERROR: case ASYNC_LIP_NOS_OLS_RECV: case ASYNC_LIP_OCCURRED: case ASYNC_PTPMODE: /* * These are broadcast events that have to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); int topo = fcp->isp_topo; if (fcp->role == ISP_ROLE_NONE) continue; if (fcp->isp_loopstate > LOOP_HAVE_LINK) fcp->isp_loopstate = LOOP_HAVE_LINK; ISP_SET_SENDMARKER(isp, chan, 1); isp_async(isp, ISPASYNC_LIP, chan); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif /* * We've had problems with data corruption occuring on * commands that complete (with no apparent error) after * we receive a LIP. This has been observed mostly on * Local Loop topologies. To be safe, let's just mark * all active initiator commands as dead. */ if (topo == TOPO_NL_PORT || topo == TOPO_FL_PORT) { int i, j; for (i = j = 0; i < isp->isp_maxcmds; i++) { XS_T *xs; isp_hdl_t *hdp; hdp = &isp->isp_xflist[i]; if (ISP_H2HT(hdp->handle) != ISP_HANDLE_INITIATOR) { continue; } xs = hdp->cmd; if (XS_CHANNEL(xs) != chan) { continue; } j++; isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx bus reset set at %s:%u", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs), __func__, __LINE__); XS_SETERR(xs, HBA_BUSRESET); } if (j) { isp_prt(isp, ISP_LOGERR, lipd, chan, j); } } } break; case ASYNC_LOOP_UP: /* * This is a broadcast event that has to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; fcp->isp_linkstate = 1; if (fcp->isp_loopstate < LOOP_HAVE_LINK) fcp->isp_loopstate = LOOP_HAVE_LINK; ISP_SET_SENDMARKER(isp, chan, 1); isp_async(isp, ISPASYNC_LOOP_UP, chan); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif } break; case ASYNC_LOOP_DOWN: /* * This is a broadcast event that has to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; ISP_SET_SENDMARKER(isp, chan, 1); fcp->isp_linkstate = 0; fcp->isp_loopstate = LOOP_NIL; isp_async(isp, ISPASYNC_LOOP_DOWN, chan); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif } break; case ASYNC_LOOP_RESET: /* * This is a broadcast event that has to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; ISP_SET_SENDMARKER(isp, chan, 1); if (fcp->isp_loopstate > LOOP_HAVE_LINK) fcp->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_LOOP_RESET, chan); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif } break; case ASYNC_PDB_CHANGED: { int echan, nphdl, nlstate, reason; if (IS_23XX(isp) || IS_24XX(isp)) { nphdl = ISP_READ(isp, OUTMAILBOX1); nlstate = ISP_READ(isp, OUTMAILBOX2); } else { nphdl = nlstate = 0xffff; } if (IS_24XX(isp)) reason = ISP_READ(isp, OUTMAILBOX3) >> 8; else reason = 0xff; if (ISP_CAP_MULTI_ID(isp)) { chan = ISP_READ(isp, OUTMAILBOX3) & 0xff; if (chan == 0xff || nphdl == NIL_HANDLE) { chan = 0; echan = isp->isp_nchan - 1; } else if (chan >= isp->isp_nchan) { break; } else { echan = chan; } } else { chan = echan = 0; } for (; chan <= echan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; - if (fcp->isp_loopstate > LOOP_LTEST_DONE) + if (fcp->isp_loopstate > LOOP_LTEST_DONE) { + if (nphdl != NIL_HANDLE && + nphdl == fcp->isp_login_hdl && + reason == PDB24XX_AE_OPN_2) + continue; fcp->isp_loopstate = LOOP_LTEST_DONE; - else if (fcp->isp_loopstate < LOOP_HAVE_LINK) + } else if (fcp->isp_loopstate < LOOP_HAVE_LINK) fcp->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_PDB, nphdl, nlstate, reason); } break; } case ASYNC_CHANGE_NOTIFY: { int portid; portid = ((ISP_READ(isp, OUTMAILBOX1) & 0xff) << 16) | ISP_READ(isp, OUTMAILBOX2); if (ISP_CAP_MULTI_ID(isp)) { chan = ISP_READ(isp, OUTMAILBOX3) & 0xff; if (chan >= isp->isp_nchan) break; } else { chan = 0; } fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) break; if (fcp->isp_loopstate > LOOP_LTEST_DONE) fcp->isp_loopstate = LOOP_LTEST_DONE; else if (fcp->isp_loopstate < LOOP_HAVE_LINK) fcp->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_SNS, portid); break; } case ASYNC_ERR_LOGGING_DISABLED: isp_prt(isp, ISP_LOGWARN, "Error logging disabled (reason 0x%x)", ISP_READ(isp, OUTMAILBOX1)); break; case ASYNC_CONNMODE: /* * This only applies to 2100 amd 2200 cards */ if (!IS_2200(isp) && !IS_2100(isp)) { isp_prt(isp, ISP_LOGWARN, "bad card for ASYNC_CONNMODE event"); break; } chan = 0; mbox = ISP_READ(isp, OUTMAILBOX1); switch (mbox) { case ISP_CONN_LOOP: isp_prt(isp, ISP_LOGINFO, "Point-to-Point -> Loop mode"); break; case ISP_CONN_PTP: isp_prt(isp, ISP_LOGINFO, "Loop -> Point-to-Point mode"); break; case ISP_CONN_BADLIP: isp_prt(isp, ISP_LOGWARN, "Point-to-Point -> Loop mode (BAD LIP)"); break; case ISP_CONN_FATAL: isp->isp_dead = 1; isp->isp_state = ISP_CRASHED; isp_prt(isp, ISP_LOGERR, "FATAL CONNECTION ERROR"); isp_async(isp, ISPASYNC_FW_CRASH); return (-1); case ISP_CONN_LOOPBACK: isp_prt(isp, ISP_LOGWARN, "Looped Back in Point-to-Point mode"); break; default: isp_prt(isp, ISP_LOGWARN, "Unknown connection mode (0x%x)", mbox); break; } ISP_SET_SENDMARKER(isp, chan, 1); FCPARAM(isp, chan)->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_OTHER); break; case ASYNC_P2P_INIT_ERR: isp_prt(isp, ISP_LOGWARN, "P2P init error (reason 0x%x)", ISP_READ(isp, OUTMAILBOX1)); break; case ASYNC_RCV_ERR: if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGWARN, "Receive Error"); } else { isp_prt(isp, ISP_LOGWARN, "unexpected ASYNC_RCV_ERR"); } break; case ASYNC_RJT_SENT: /* same as ASYNC_QFULL_SENT */ if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGTDEBUG0, "LS_RJT sent"); break; } else { isp_prt(isp, ISP_LOGTDEBUG0, "QFULL sent"); break; } case ASYNC_FW_RESTART_COMPLETE: isp_prt(isp, ISP_LOGDEBUG0, "FW restart complete"); break; case ASYNC_TEMPERATURE_ALERT: isp_prt(isp, ISP_LOGERR, "Temperature alert (subcode 0x%x)", ISP_READ(isp, OUTMAILBOX1)); break; case ASYNC_AUTOLOAD_FW_COMPLETE: isp_prt(isp, ISP_LOGDEBUG0, "Autoload FW init complete"); break; case ASYNC_AUTOLOAD_FW_FAILURE: isp_prt(isp, ISP_LOGERR, "Autoload FW init failure"); break; default: isp_prt(isp, ISP_LOGWARN, "Unknown Async Code 0x%x", mbox); break; } if (mbox != ASYNC_CTIO_DONE && mbox != ASYNC_CMD_CMPLT) { isp->isp_intoasync++; } return (acked); } /* * Handle other response entries. A pointer to the request queue output * index is here in case we want to eat several entries at once, although * this is not used currently. */ static int isp_handle_other_response(ispsoftc_t *isp, int type, isphdr_t *hp, uint32_t *optrp) { isp_ridacq_t rid; int chan, c; uint32_t hdl; void *ptr; switch (type) { case RQSTYPE_STATUS_CONT: isp_prt(isp, ISP_LOG_WARN1, "Ignored Continuation Response"); return (1); case RQSTYPE_MARKER: isp_prt(isp, ISP_LOG_WARN1, "Marker Response"); return (1); case RQSTYPE_RPT_ID_ACQ: isp_get_ridacq(isp, (isp_ridacq_t *)hp, &rid); if (rid.ridacq_format == 0) { for (chan = 0; chan < isp->isp_nchan; chan++) { fcparam *fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; c = (chan == 0) ? 127 : (chan - 1); if (rid.ridacq_map[c / 16] & (1 << (c % 16)) || chan == 0) { fcp->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_OTHER); } else { fcp->isp_loopstate = LOOP_NIL; isp_async(isp, ISPASYNC_LOOP_DOWN, chan); } } } else { fcparam *fcp = FCPARAM(isp, rid.ridacq_vp_index); if (rid.ridacq_vp_status == RIDACQ_STS_COMPLETE || rid.ridacq_vp_status == RIDACQ_STS_CHANGED) { fcp->isp_loopstate = LOOP_HAVE_LINK; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, rid.ridacq_vp_index, ISPASYNC_CHANGE_OTHER); } else { fcp->isp_loopstate = LOOP_NIL; isp_async(isp, ISPASYNC_LOOP_DOWN, rid.ridacq_vp_index); } } return (1); case RQSTYPE_CT_PASSTHRU: case RQSTYPE_VP_MODIFY: case RQSTYPE_VP_CTRL: case RQSTYPE_LOGIN: ISP_IOXGET_32(isp, (uint32_t *)(hp + 1), hdl); ptr = isp_find_xs(isp, hdl); if (ptr != NULL) { isp_destroy_handle(isp, hdl); memcpy(ptr, hp, QENTRY_LEN); wakeup(ptr); } return (1); case RQSTYPE_ATIO: case RQSTYPE_CTIO: case RQSTYPE_ENABLE_LUN: case RQSTYPE_MODIFY_LUN: case RQSTYPE_NOTIFY: case RQSTYPE_NOTIFY_ACK: case RQSTYPE_CTIO1: case RQSTYPE_ATIO2: case RQSTYPE_CTIO2: case RQSTYPE_CTIO3: case RQSTYPE_CTIO7: case RQSTYPE_ABTS_RCVD: case RQSTYPE_ABTS_RSP: isp->isp_rsltccmplt++; /* count as a response completion */ #ifdef ISP_TARGET_MODE if (isp_target_notify(isp, (ispstatusreq_t *) hp, optrp)) { return (1); } #endif /* FALLTHROUGH */ case RQSTYPE_REQUEST: default: ISP_DELAY(100); if (type != isp_get_response_type(isp, hp)) { /* * This is questionable- we're just papering over * something we've seen on SMP linux in target * mode- we don't really know what's happening * here that causes us to think we've gotten * an entry, but that either the entry isn't * filled out yet or our CPU read data is stale. */ isp_prt(isp, ISP_LOGINFO, "unstable type in response queue"); return (-1); } isp_prt(isp, ISP_LOGWARN, "Unhandled Response Type 0x%x", isp_get_response_type(isp, hp)); return (0); } } static void isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp) { switch (sp->req_completion_status & 0xff) { case RQCS_COMPLETE: if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_INCOMPLETE: if ((sp->req_state_flags & RQSF_GOT_TARGET) == 0) { isp_xs_prt(isp, xs, ISP_LOG_WARN1, "Selection Timeout @ %s:%d", __func__, __LINE__); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); *rp = XS_XFRLEN(xs); } return; } isp_xs_prt(isp, xs, ISP_LOGERR, "Command Incomplete, state 0x%x", sp->req_state_flags); break; case RQCS_DMA_ERROR: isp_xs_prt(isp, xs, ISP_LOGERR, "DMA Error"); *rp = XS_XFRLEN(xs); break; case RQCS_TRANSPORT_ERROR: { char buf[172]; ISP_SNPRINTF(buf, sizeof (buf), "states=>"); if (sp->req_state_flags & RQSF_GOT_BUS) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_BUS", buf); } if (sp->req_state_flags & RQSF_GOT_TARGET) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_TGT", buf); } if (sp->req_state_flags & RQSF_SENT_CDB) { ISP_SNPRINTF(buf, sizeof (buf), "%s SENT_CDB", buf); } if (sp->req_state_flags & RQSF_XFRD_DATA) { ISP_SNPRINTF(buf, sizeof (buf), "%s XFRD_DATA", buf); } if (sp->req_state_flags & RQSF_GOT_STATUS) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_STS", buf); } if (sp->req_state_flags & RQSF_GOT_SENSE) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_SNS", buf); } if (sp->req_state_flags & RQSF_XFER_COMPLETE) { ISP_SNPRINTF(buf, sizeof (buf), "%s XFR_CMPLT", buf); } ISP_SNPRINTF(buf, sizeof (buf), "%s\nstatus=>", buf); if (sp->req_status_flags & RQSTF_DISCONNECT) { ISP_SNPRINTF(buf, sizeof (buf), "%s Disconnect", buf); } if (sp->req_status_flags & RQSTF_SYNCHRONOUS) { ISP_SNPRINTF(buf, sizeof (buf), "%s Sync_xfr", buf); } if (sp->req_status_flags & RQSTF_PARITY_ERROR) { ISP_SNPRINTF(buf, sizeof (buf), "%s Parity", buf); } if (sp->req_status_flags & RQSTF_BUS_RESET) { ISP_SNPRINTF(buf, sizeof (buf), "%s Bus_Reset", buf); } if (sp->req_status_flags & RQSTF_DEVICE_RESET) { ISP_SNPRINTF(buf, sizeof (buf), "%s Device_Reset", buf); } if (sp->req_status_flags & RQSTF_ABORTED) { ISP_SNPRINTF(buf, sizeof (buf), "%s Aborted", buf); } if (sp->req_status_flags & RQSTF_TIMEOUT) { ISP_SNPRINTF(buf, sizeof (buf), "%s Timeout", buf); } if (sp->req_status_flags & RQSTF_NEGOTIATION) { ISP_SNPRINTF(buf, sizeof (buf), "%s Negotiation", buf); } isp_xs_prt(isp, xs, ISP_LOGERR, "Transport Error: %s", buf); *rp = XS_XFRLEN(xs); break; } case RQCS_RESET_OCCURRED: { int chan; isp_xs_prt(isp, xs, ISP_LOGWARN, "Bus Reset destroyed command"); for (chan = 0; chan < isp->isp_nchan; chan++) { FCPARAM(isp, chan)->sendmarker = 1; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BUSRESET); } *rp = XS_XFRLEN(xs); return; } case RQCS_ABORTED: isp_xs_prt(isp, xs, ISP_LOGERR, "Command Aborted"); ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 1); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_TIMEOUT: isp_xs_prt(isp, xs, ISP_LOGWARN, "Command timed out"); /* * XXX: Check to see if we logged out of the device. */ if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_CMDTIMEOUT); } return; case RQCS_DATA_OVERRUN: XS_SET_RESID(xs, sp->req_resid); isp_xs_prt(isp, xs, ISP_LOGERR, "data overrun (%ld)", (long) XS_GET_RESID(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_DATAOVR); } return; case RQCS_COMMAND_OVERRUN: isp_xs_prt(isp, xs, ISP_LOGERR, "command overrun"); break; case RQCS_STATUS_OVERRUN: isp_xs_prt(isp, xs, ISP_LOGERR, "status overrun"); break; case RQCS_BAD_MESSAGE: isp_xs_prt(isp, xs, ISP_LOGERR, "msg not COMMAND COMPLETE after status"); break; case RQCS_NO_MESSAGE_OUT: isp_xs_prt(isp, xs, ISP_LOGERR, "No MESSAGE OUT phase after selection"); break; case RQCS_EXT_ID_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "EXTENDED IDENTIFY failed"); break; case RQCS_IDE_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "INITIATOR DETECTED ERROR rejected"); break; case RQCS_ABORT_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "ABORT OPERATION rejected"); break; case RQCS_REJECT_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "MESSAGE REJECT rejected"); break; case RQCS_NOP_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "NOP rejected"); break; case RQCS_PARITY_ERROR_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "MESSAGE PARITY ERROR rejected"); break; case RQCS_DEVICE_RESET_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGWARN, "BUS DEVICE RESET rejected"); break; case RQCS_ID_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "IDENTIFY rejected"); break; case RQCS_UNEXP_BUS_FREE: isp_xs_prt(isp, xs, ISP_LOGERR, "Unexpected Bus Free"); break; case RQCS_DATA_UNDERRUN: { if (IS_FC(isp)) { int ru_marked = (sp->req_scsi_status & RQCS_RU) != 0; if (!ru_marked || sp->req_resid > XS_XFRLEN(xs)) { isp_xs_prt(isp, xs, ISP_LOGWARN, bun, XS_XFRLEN(xs), sp->req_resid, (ru_marked)? "marked" : "not marked"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } return; } } XS_SET_RESID(xs, sp->req_resid); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; } case RQCS_XACT_ERR1: isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued transaction with disconnect not set"); break; case RQCS_XACT_ERR2: isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued transaction to target routine %jx", (uintmax_t)XS_LUN(xs)); break; case RQCS_XACT_ERR3: isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued cmd when queueing disabled"); break; case RQCS_BAD_ENTRY: isp_prt(isp, ISP_LOGERR, "Invalid IOCB entry type detected"); break; case RQCS_QUEUE_FULL: isp_xs_prt(isp, xs, ISP_LOG_WARN1, "internal queues full status 0x%x", *XS_STSP(xs)); /* * If QFULL or some other status byte is set, then this * isn't an error, per se. * * Unfortunately, some QLogic f/w writers have, in * some cases, ommitted to *set* status to QFULL. */ #if 0 if (*XS_STSP(xs) != SCSI_GOOD && XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); return; } #endif *XS_STSP(xs) = SCSI_QFULL; XS_SETERR(xs, HBA_NOERROR); return; case RQCS_PHASE_SKIPPED: isp_xs_prt(isp, xs, ISP_LOGERR, "SCSI phase skipped"); break; case RQCS_ARQS_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "Auto Request Sense Failed"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ARQFAIL); } return; case RQCS_WIDE_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "Wide Negotiation Failed"); if (IS_SCSI(isp)) { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp->isp_devparam[XS_TGT(xs)].goal_flags &= ~DPARM_WIDE; sdp->isp_devparam[XS_TGT(xs)].dev_update = 1; sdp->update = 1; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_SYNCXFER_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "SDTR Message Failed"); if (IS_SCSI(isp)) { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp += XS_CHANNEL(xs); sdp->isp_devparam[XS_TGT(xs)].goal_flags &= ~DPARM_SYNC; sdp->isp_devparam[XS_TGT(xs)].dev_update = 1; sdp->update = 1; } break; case RQCS_LVD_BUSERR: isp_xs_prt(isp, xs, ISP_LOGERR, "Bad LVD condition"); break; case RQCS_PORT_UNAVAILABLE: /* * No such port on the loop. Moral equivalent of SELTIMEO */ case RQCS_PORT_LOGGED_OUT: { const char *reason; uint8_t sts = sp->req_completion_status & 0xff; /* * It was there (maybe)- treat as a selection timeout. */ if (sts == RQCS_PORT_UNAVAILABLE) { reason = "unavailable"; } else { reason = "logout"; } isp_prt(isp, ISP_LOGINFO, "port %s for target %d", reason, XS_TGT(xs)); /* * If we're on a local loop, force a LIP (which is overkill) * to force a re-login of this unit. If we're on fabric, * then we'll have to log in again as a matter of course. */ if (FCPARAM(isp, 0)->isp_topo == TOPO_NL_PORT || FCPARAM(isp, 0)->isp_topo == TOPO_FL_PORT) { mbreg_t mbs; MBSINIT(&mbs, MBOX_INIT_LIP, MBLOGALL, 0); if (ISP_CAP_2KLOGIN(isp)) { mbs.ibits = (1 << 10); } isp_mboxcmd_qnw(isp, &mbs, 1); } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; } case RQCS_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, "port changed for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; case RQCS_PORT_BUSY: isp_prt(isp, ISP_LOGWARN, "port busy for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_TGTBSY); } return; default: isp_prt(isp, ISP_LOGERR, "Unknown Completion Status 0x%x", sp->req_completion_status); break; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } } static void isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp, XS_T *xs, long *rp) { int ru_marked, sv_marked; int chan = XS_CHANNEL(xs); switch (sp->req_completion_status) { case RQCS_COMPLETE: if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_DMA_ERROR: isp_xs_prt(isp, xs, ISP_LOGERR, "DMA error"); break; case RQCS_TRANSPORT_ERROR: isp_xs_prt(isp, xs, ISP_LOGERR, "Transport Error"); break; case RQCS_RESET_OCCURRED: isp_xs_prt(isp, xs, ISP_LOGWARN, "reset destroyed command"); FCPARAM(isp, chan)->sendmarker = 1; if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BUSRESET); } return; case RQCS_ABORTED: isp_xs_prt(isp, xs, ISP_LOGERR, "Command Aborted"); FCPARAM(isp, chan)->sendmarker = 1; if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_TIMEOUT: isp_xs_prt(isp, xs, ISP_LOGWARN, "Command Timed Out"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_CMDTIMEOUT); } return; case RQCS_DATA_OVERRUN: XS_SET_RESID(xs, sp->req_resid); isp_xs_prt(isp, xs, ISP_LOGERR, "Data Overrun"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_DATAOVR); } return; case RQCS_24XX_DRE: /* data reassembly error */ isp_prt(isp, ISP_LOGERR, "Chan %d data reassembly error for target %d", chan, XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } *rp = XS_XFRLEN(xs); return; case RQCS_24XX_TABORT: /* aborted by target */ isp_prt(isp, ISP_LOGERR, "Chan %d target %d sent ABTS", chan, XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_DATA_UNDERRUN: ru_marked = (sp->req_scsi_status & RQCS_RU) != 0; /* * We can get an underrun w/o things being marked * if we got a non-zero status. */ sv_marked = (sp->req_scsi_status & (RQCS_SV|RQCS_RV)) != 0; if ((ru_marked == 0 && sv_marked == 0) || (sp->req_resid > XS_XFRLEN(xs))) { isp_xs_prt(isp, xs, ISP_LOGWARN, bun, XS_XFRLEN(xs), sp->req_resid, (ru_marked)? "marked" : "not marked"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } return; } XS_SET_RESID(xs, sp->req_resid); isp_xs_prt(isp, xs, ISP_LOG_WARN1, "Data Underrun (%d) for command 0x%x", sp->req_resid, XS_CDBP(xs)[0] & 0xff); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_PORT_UNAVAILABLE: /* * No such port on the loop. Moral equivalent of SELTIMEO */ case RQCS_PORT_LOGGED_OUT: { const char *reason; uint8_t sts = sp->req_completion_status & 0xff; /* * It was there (maybe)- treat as a selection timeout. */ if (sts == RQCS_PORT_UNAVAILABLE) { reason = "unavailable"; } else { reason = "logout"; } isp_prt(isp, ISP_LOGINFO, "Chan %d port %s for target %d", chan, reason, XS_TGT(xs)); /* * There is no MBOX_INIT_LIP for the 24XX. */ if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; } case RQCS_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, "port changed for target %d chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; case RQCS_24XX_ENOMEM: /* f/w resource unavailable */ isp_prt(isp, ISP_LOGWARN, "f/w resource unavailable for target %d chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; case RQCS_24XX_TMO: /* task management overrun */ isp_prt(isp, ISP_LOGWARN, "command for target %d overlapped task management for chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; default: isp_prt(isp, ISP_LOGERR, "Unknown Completion Status 0x%x on chan %d", sp->req_completion_status, chan); break; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } } static void isp_fastpost_complete(ispsoftc_t *isp, uint32_t fph) { XS_T *xs; if (fph == 0) { return; } xs = isp_find_xs(isp, fph); if (xs == NULL) { isp_prt(isp, ISP_LOGWARN, "Command for fast post handle 0x%x not found", fph); return; } isp_destroy_handle(isp, fph); /* * Since we don't have a result queue entry item, * we must believe that SCSI status is zero and * that all data transferred. */ XS_SET_RESID(xs, 0); *XS_STSP(xs) = SCSI_GOOD; if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, fph); } if (isp->isp_nactive) { isp->isp_nactive--; } isp->isp_fphccmplt++; isp_done(xs); } static int isp_mbox_continue(ispsoftc_t *isp) { mbreg_t mbs; uint16_t *ptr; uint32_t offset; switch (isp->isp_lastmbxcmd) { case MBOX_WRITE_RAM_WORD: case MBOX_READ_RAM_WORD: case MBOX_WRITE_RAM_WORD_EXTENDED: case MBOX_READ_RAM_WORD_EXTENDED: break; default: return (1); } if (isp->isp_mboxtmp[0] != MBOX_COMMAND_COMPLETE) { isp->isp_mbxwrk0 = 0; return (-1); } /* * Clear the previous interrupt. */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU_SEMA, 0); } /* * Continue with next word. */ ISP_MEMZERO(&mbs, sizeof (mbs)); ptr = isp->isp_mbxworkp; switch (isp->isp_lastmbxcmd) { case MBOX_WRITE_RAM_WORD: mbs.param[1] = isp->isp_mbxwrk1++; mbs.param[2] = *ptr++; break; case MBOX_READ_RAM_WORD: *ptr++ = isp->isp_mboxtmp[2]; mbs.param[1] = isp->isp_mbxwrk1++; break; case MBOX_WRITE_RAM_WORD_EXTENDED: if (IS_24XX(isp)) { uint32_t *lptr = (uint32_t *)ptr; mbs.param[2] = lptr[0]; mbs.param[3] = lptr[0] >> 16; lptr++; ptr = (uint16_t *)lptr; } else { mbs.param[2] = *ptr++; } offset = isp->isp_mbxwrk1; offset |= isp->isp_mbxwrk8 << 16; mbs.param[1] = offset; mbs.param[8] = offset >> 16; offset++; isp->isp_mbxwrk1 = offset; isp->isp_mbxwrk8 = offset >> 16; break; case MBOX_READ_RAM_WORD_EXTENDED: if (IS_24XX(isp)) { uint32_t *lptr = (uint32_t *)ptr; uint32_t val = isp->isp_mboxtmp[2]; val |= (isp->isp_mboxtmp[3]) << 16; *lptr++ = val; ptr = (uint16_t *)lptr; } else { *ptr++ = isp->isp_mboxtmp[2]; } offset = isp->isp_mbxwrk1; offset |= isp->isp_mbxwrk8 << 16; mbs.param[1] = offset; mbs.param[8] = offset >> 16; offset++; isp->isp_mbxwrk1 = offset; isp->isp_mbxwrk8 = offset >> 16; break; } isp->isp_mbxworkp = ptr; isp->isp_mbxwrk0--; mbs.param[0] = isp->isp_lastmbxcmd; mbs.logval = MBLOGALL; isp_mboxcmd_qnw(isp, &mbs, 0); return (0); } #define ISP_SCSI_IBITS(op) (mbpscsi[((op)<<1)]) #define ISP_SCSI_OBITS(op) (mbpscsi[((op)<<1) + 1]) #define ISP_SCSI_OPMAP(in, out) in, out static const uint8_t mbpscsi[] = { ISP_SCSI_OPMAP(0x01, 0x01), /* 0x00: MBOX_NO_OP */ ISP_SCSI_OPMAP(0x1f, 0x01), /* 0x01: MBOX_LOAD_RAM */ ISP_SCSI_OPMAP(0x03, 0x01), /* 0x02: MBOX_EXEC_FIRMWARE */ ISP_SCSI_OPMAP(0x1f, 0x01), /* 0x03: MBOX_DUMP_RAM */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x04: MBOX_WRITE_RAM_WORD */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x05: MBOX_READ_RAM_WORD */ ISP_SCSI_OPMAP(0x3f, 0x3f), /* 0x06: MBOX_MAILBOX_REG_TEST */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x07: MBOX_VERIFY_CHECKSUM */ ISP_SCSI_OPMAP(0x01, 0x0f), /* 0x08: MBOX_ABOUT_FIRMWARE */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x09: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0a: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0b: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0d: */ ISP_SCSI_OPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0f: */ ISP_SCSI_OPMAP(0x1f, 0x1f), /* 0x10: MBOX_INIT_REQ_QUEUE */ ISP_SCSI_OPMAP(0x3f, 0x3f), /* 0x11: MBOX_INIT_RES_QUEUE */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x12: MBOX_EXECUTE_IOCB */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x13: MBOX_WAKE_UP */ ISP_SCSI_OPMAP(0x01, 0x3f), /* 0x14: MBOX_STOP_FIRMWARE */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x15: MBOX_ABORT */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x16: MBOX_ABORT_DEVICE */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x17: MBOX_ABORT_TARGET */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x18: MBOX_BUS_RESET */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x19: MBOX_STOP_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x1a: MBOX_START_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x1b: MBOX_SINGLE_STEP_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x1c: MBOX_ABORT_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x4f), /* 0x1d: MBOX_GET_DEV_QUEUE_STATUS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x1e: */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x1f: MBOX_GET_FIRMWARE_STATUS */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x20: MBOX_GET_INIT_SCSI_ID */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x21: MBOX_GET_SELECT_TIMEOUT */ ISP_SCSI_OPMAP(0x01, 0xc7), /* 0x22: MBOX_GET_RETRY_COUNT */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x23: MBOX_GET_TAG_AGE_LIMIT */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x24: MBOX_GET_CLOCK_RATE */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x25: MBOX_GET_ACT_NEG_STATE */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x26: MBOX_GET_ASYNC_DATA_SETUP_TIME */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x27: MBOX_GET_PCI_PARAMS */ ISP_SCSI_OPMAP(0x03, 0x4f), /* 0x28: MBOX_GET_TARGET_PARAMS */ ISP_SCSI_OPMAP(0x03, 0x0f), /* 0x29: MBOX_GET_DEV_QUEUE_PARAMS */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x2a: MBOX_GET_RESET_DELAY_PARAMS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2b: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2d: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2e: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2f: */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x30: MBOX_SET_INIT_SCSI_ID */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x31: MBOX_SET_SELECT_TIMEOUT */ ISP_SCSI_OPMAP(0xc7, 0xc7), /* 0x32: MBOX_SET_RETRY_COUNT */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x33: MBOX_SET_TAG_AGE_LIMIT */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x34: MBOX_SET_CLOCK_RATE */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x35: MBOX_SET_ACT_NEG_STATE */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x36: MBOX_SET_ASYNC_DATA_SETUP_TIME */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x37: MBOX_SET_PCI_CONTROL_PARAMS */ ISP_SCSI_OPMAP(0x4f, 0x4f), /* 0x38: MBOX_SET_TARGET_PARAMS */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x39: MBOX_SET_DEV_QUEUE_PARAMS */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x3a: MBOX_SET_RESET_DELAY_PARAMS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3b: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3d: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3e: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3f: */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x40: MBOX_RETURN_BIOS_BLOCK_ADDR */ ISP_SCSI_OPMAP(0x3f, 0x01), /* 0x41: MBOX_WRITE_FOUR_RAM_WORDS */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x42: MBOX_EXEC_BIOS_IOCB */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x43: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x44: */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x45: SET SYSTEM PARAMETER */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x46: GET SYSTEM PARAMETER */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x47: */ ISP_SCSI_OPMAP(0x01, 0xcf), /* 0x48: GET SCAM CONFIGURATION */ ISP_SCSI_OPMAP(0xcf, 0xcf), /* 0x49: SET SCAM CONFIGURATION */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x4a: MBOX_SET_FIRMWARE_FEATURES */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x4b: MBOX_GET_FIRMWARE_FEATURES */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4d: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4e: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4f: */ ISP_SCSI_OPMAP(0xdf, 0xdf), /* 0x50: LOAD RAM A64 */ ISP_SCSI_OPMAP(0xdf, 0xdf), /* 0x51: DUMP RAM A64 */ ISP_SCSI_OPMAP(0xdf, 0xff), /* 0x52: INITIALIZE REQUEST QUEUE A64 */ ISP_SCSI_OPMAP(0xef, 0xff), /* 0x53: INITIALIZE RESPONSE QUEUE A64 */ ISP_SCSI_OPMAP(0xcf, 0x01), /* 0x54: EXECUCUTE COMMAND IOCB A64 */ ISP_SCSI_OPMAP(0x07, 0x01), /* 0x55: ENABLE TARGET MODE */ ISP_SCSI_OPMAP(0x03, 0x0f), /* 0x56: GET TARGET STATUS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x57: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x58: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x59: */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x5a: SET DATA OVERRUN RECOVERY MODE */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x5b: GET DATA OVERRUN RECOVERY MODE */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x5c: SET HOST DATA */ ISP_SCSI_OPMAP(0x01, 0x01) /* 0x5d: GET NOST DATA */ }; #define MAX_SCSI_OPCODE 0x5d static const char *scsi_mbcmd_names[] = { "NO-OP", "LOAD RAM", "EXEC FIRMWARE", "DUMP RAM", "WRITE RAM WORD", "READ RAM WORD", "MAILBOX REG TEST", "VERIFY CHECKSUM", "ABOUT FIRMWARE", NULL, NULL, NULL, NULL, NULL, "CHECK FIRMWARE", NULL, "INIT REQUEST QUEUE", "INIT RESULT QUEUE", "EXECUTE IOCB", "WAKE UP", "STOP FIRMWARE", "ABORT", "ABORT DEVICE", "ABORT TARGET", "BUS RESET", "STOP QUEUE", "START QUEUE", "SINGLE STEP QUEUE", "ABORT QUEUE", "GET DEV QUEUE STATUS", NULL, "GET FIRMWARE STATUS", "GET INIT SCSI ID", "GET SELECT TIMEOUT", "GET RETRY COUNT", "GET TAG AGE LIMIT", "GET CLOCK RATE", "GET ACT NEG STATE", "GET ASYNC DATA SETUP TIME", "GET PCI PARAMS", "GET TARGET PARAMS", "GET DEV QUEUE PARAMS", "GET RESET DELAY PARAMS", NULL, NULL, NULL, NULL, NULL, "SET INIT SCSI ID", "SET SELECT TIMEOUT", "SET RETRY COUNT", "SET TAG AGE LIMIT", "SET CLOCK RATE", "SET ACT NEG STATE", "SET ASYNC DATA SETUP TIME", "SET PCI CONTROL PARAMS", "SET TARGET PARAMS", "SET DEV QUEUE PARAMS", "SET RESET DELAY PARAMS", NULL, NULL, NULL, NULL, NULL, "RETURN BIOS BLOCK ADDR", "WRITE FOUR RAM WORDS", "EXEC BIOS IOCB", NULL, NULL, "SET SYSTEM PARAMETER", "GET SYSTEM PARAMETER", NULL, "GET SCAM CONFIGURATION", "SET SCAM CONFIGURATION", "SET FIRMWARE FEATURES", "GET FIRMWARE FEATURES", NULL, NULL, NULL, NULL, "LOAD RAM A64", "DUMP RAM A64", "INITIALIZE REQUEST QUEUE A64", "INITIALIZE RESPONSE QUEUE A64", "EXECUTE IOCB A64", "ENABLE TARGET MODE", "GET TARGET MODE STATE", NULL, NULL, NULL, "SET DATA OVERRUN RECOVERY MODE", "GET DATA OVERRUN RECOVERY MODE", "SET HOST DATA", "GET NOST DATA", }; #define ISP_FC_IBITS(op) ((mbpfc[((op)<<3) + 0] << 24) | (mbpfc[((op)<<3) + 1] << 16) | (mbpfc[((op)<<3) + 2] << 8) | (mbpfc[((op)<<3) + 3])) #define ISP_FC_OBITS(op) ((mbpfc[((op)<<3) + 4] << 24) | (mbpfc[((op)<<3) + 5] << 16) | (mbpfc[((op)<<3) + 6] << 8) | (mbpfc[((op)<<3) + 7])) #define ISP_FC_OPMAP(in0, out0) 0, 0, 0, in0, 0, 0, 0, out0 #define ISP_FC_OPMAP_HALF(in1, in0, out1, out0) 0, 0, in1, in0, 0, 0, out1, out0 #define ISP_FC_OPMAP_FULL(in3, in2, in1, in0, out3, out2, out1, out0) in3, in2, in1, in0, out3, out2, out1, out0 static const uint32_t mbpfc[] = { ISP_FC_OPMAP(0x01, 0x01), /* 0x00: MBOX_NO_OP */ ISP_FC_OPMAP(0x1f, 0x01), /* 0x01: MBOX_LOAD_RAM */ ISP_FC_OPMAP_HALF(0x07, 0xff, 0x00, 0x03), /* 0x02: MBOX_EXEC_FIRMWARE */ ISP_FC_OPMAP(0xdf, 0x01), /* 0x03: MBOX_DUMP_RAM */ ISP_FC_OPMAP(0x07, 0x07), /* 0x04: MBOX_WRITE_RAM_WORD */ ISP_FC_OPMAP(0x03, 0x07), /* 0x05: MBOX_READ_RAM_WORD */ ISP_FC_OPMAP_FULL(0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff), /* 0x06: MBOX_MAILBOX_REG_TEST */ ISP_FC_OPMAP(0x07, 0x07), /* 0x07: MBOX_VERIFY_CHECKSUM */ ISP_FC_OPMAP_FULL(0x0, 0x0, 0x0, 0x01, 0x0, 0x3, 0x80, 0x7f), /* 0x08: MBOX_ABOUT_FIRMWARE */ ISP_FC_OPMAP(0xdf, 0x01), /* 0x09: MBOX_LOAD_RISC_RAM_2100 */ ISP_FC_OPMAP(0xdf, 0x01), /* 0x0a: DUMP RAM */ ISP_FC_OPMAP_HALF(0x1, 0xff, 0x0, 0x01), /* 0x0b: MBOX_LOAD_RISC_RAM */ ISP_FC_OPMAP(0x00, 0x00), /* 0x0c: */ ISP_FC_OPMAP_HALF(0x1, 0x0f, 0x0, 0x01), /* 0x0d: MBOX_WRITE_RAM_WORD_EXTENDED */ ISP_FC_OPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */ ISP_FC_OPMAP_HALF(0x1, 0x03, 0x0, 0x0d), /* 0x0f: MBOX_READ_RAM_WORD_EXTENDED */ ISP_FC_OPMAP(0x1f, 0x11), /* 0x10: MBOX_INIT_REQ_QUEUE */ ISP_FC_OPMAP(0x2f, 0x21), /* 0x11: MBOX_INIT_RES_QUEUE */ ISP_FC_OPMAP(0x0f, 0x01), /* 0x12: MBOX_EXECUTE_IOCB */ ISP_FC_OPMAP(0x03, 0x03), /* 0x13: MBOX_WAKE_UP */ ISP_FC_OPMAP_HALF(0x1, 0xff, 0x0, 0x03), /* 0x14: MBOX_STOP_FIRMWARE */ ISP_FC_OPMAP(0x4f, 0x01), /* 0x15: MBOX_ABORT */ ISP_FC_OPMAP(0x07, 0x01), /* 0x16: MBOX_ABORT_DEVICE */ ISP_FC_OPMAP(0x07, 0x01), /* 0x17: MBOX_ABORT_TARGET */ ISP_FC_OPMAP(0x03, 0x03), /* 0x18: MBOX_BUS_RESET */ ISP_FC_OPMAP(0x07, 0x05), /* 0x19: MBOX_STOP_QUEUE */ ISP_FC_OPMAP(0x07, 0x05), /* 0x1a: MBOX_START_QUEUE */ ISP_FC_OPMAP(0x07, 0x05), /* 0x1b: MBOX_SINGLE_STEP_QUEUE */ ISP_FC_OPMAP(0x07, 0x05), /* 0x1c: MBOX_ABORT_QUEUE */ ISP_FC_OPMAP(0x07, 0x03), /* 0x1d: MBOX_GET_DEV_QUEUE_STATUS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x1e: */ ISP_FC_OPMAP(0x01, 0x07), /* 0x1f: MBOX_GET_FIRMWARE_STATUS */ ISP_FC_OPMAP_HALF(0x2, 0x01, 0x7e, 0xcf), /* 0x20: MBOX_GET_LOOP_ID */ ISP_FC_OPMAP(0x00, 0x00), /* 0x21: */ ISP_FC_OPMAP(0x03, 0x4b), /* 0x22: MBOX_GET_TIMEOUT_PARAMS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x23: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x24: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x25: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x26: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x27: */ ISP_FC_OPMAP(0x01, 0x03), /* 0x28: MBOX_GET_FIRMWARE_OPTIONS */ ISP_FC_OPMAP(0x03, 0x07), /* 0x29: MBOX_GET_PORT_QUEUE_PARAMS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2a: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2b: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2c: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2d: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2f: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x30: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x31: */ ISP_FC_OPMAP(0x4b, 0x4b), /* 0x32: MBOX_SET_TIMEOUT_PARAMS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x33: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x34: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x35: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x36: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x37: */ ISP_FC_OPMAP(0x0f, 0x01), /* 0x38: MBOX_SET_FIRMWARE_OPTIONS */ ISP_FC_OPMAP(0x0f, 0x07), /* 0x39: MBOX_SET_PORT_QUEUE_PARAMS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3a: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3b: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3c: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3d: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3f: */ ISP_FC_OPMAP(0x03, 0x01), /* 0x40: MBOX_LOOP_PORT_BYPASS */ ISP_FC_OPMAP(0x03, 0x01), /* 0x41: MBOX_LOOP_PORT_ENABLE */ ISP_FC_OPMAP_HALF(0x0, 0x01, 0x3, 0xcf), /* 0x42: MBOX_GET_RESOURCE_COUNT */ ISP_FC_OPMAP(0x01, 0x01), /* 0x43: MBOX_REQUEST_OFFLINE_MODE */ ISP_FC_OPMAP(0x00, 0x00), /* 0x44: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x45: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x46: */ ISP_FC_OPMAP(0xcf, 0x03), /* 0x47: GET PORT_DATABASE ENHANCED */ ISP_FC_OPMAP(0xcf, 0x0f), /* 0x48: MBOX_INIT_FIRMWARE_MULTI_ID */ ISP_FC_OPMAP(0xcd, 0x01), /* 0x49: MBOX_GET_VP_DATABASE */ ISP_FC_OPMAP_HALF(0x2, 0xcd, 0x0, 0x01), /* 0x4a: MBOX_GET_VP_DATABASE_ENTRY */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4b: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4c: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4d: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4f: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x50: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x51: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x52: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x53: */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x54: EXECUTE IOCB A64 */ ISP_FC_OPMAP(0x00, 0x00), /* 0x55: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x56: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x57: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x58: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x59: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x5a: */ ISP_FC_OPMAP(0x03, 0x01), /* 0x5b: MBOX_DRIVER_HEARTBEAT */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x5c: MBOX_FW_HEARTBEAT */ ISP_FC_OPMAP(0x07, 0x1f), /* 0x5d: MBOX_GET_SET_DATA_RATE */ ISP_FC_OPMAP(0x00, 0x00), /* 0x5e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x5f: */ ISP_FC_OPMAP(0xcf, 0x0f), /* 0x60: MBOX_INIT_FIRMWARE */ ISP_FC_OPMAP(0x00, 0x00), /* 0x61: */ ISP_FC_OPMAP(0x01, 0x01), /* 0x62: MBOX_INIT_LIP */ ISP_FC_OPMAP(0xcd, 0x03), /* 0x63: MBOX_GET_FC_AL_POSITION_MAP */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x64: MBOX_GET_PORT_DB */ ISP_FC_OPMAP(0x07, 0x01), /* 0x65: MBOX_CLEAR_ACA */ ISP_FC_OPMAP(0x07, 0x01), /* 0x66: MBOX_TARGET_RESET */ ISP_FC_OPMAP(0x07, 0x01), /* 0x67: MBOX_CLEAR_TASK_SET */ ISP_FC_OPMAP(0x07, 0x01), /* 0x68: MBOX_ABORT_TASK_SET */ ISP_FC_OPMAP(0x01, 0x07), /* 0x69: MBOX_GET_FW_STATE */ ISP_FC_OPMAP_HALF(0x6, 0x03, 0x0, 0xcf), /* 0x6a: MBOX_GET_PORT_NAME */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x6b: MBOX_GET_LINK_STATUS */ ISP_FC_OPMAP(0x0f, 0x01), /* 0x6c: MBOX_INIT_LIP_RESET */ ISP_FC_OPMAP(0x00, 0x00), /* 0x6d: */ ISP_FC_OPMAP(0xcf, 0x03), /* 0x6e: MBOX_SEND_SNS */ ISP_FC_OPMAP(0x0f, 0x07), /* 0x6f: MBOX_FABRIC_LOGIN */ ISP_FC_OPMAP_HALF(0x02, 0x03, 0x00, 0x03), /* 0x70: MBOX_SEND_CHANGE_REQUEST */ ISP_FC_OPMAP(0x03, 0x03), /* 0x71: MBOX_FABRIC_LOGOUT */ ISP_FC_OPMAP(0x0f, 0x0f), /* 0x72: MBOX_INIT_LIP_LOGIN */ ISP_FC_OPMAP(0x00, 0x00), /* 0x73: */ ISP_FC_OPMAP(0x07, 0x01), /* 0x74: LOGIN LOOP PORT */ ISP_FC_OPMAP_HALF(0x03, 0xcf, 0x00, 0x07), /* 0x75: GET PORT/NODE NAME LIST */ ISP_FC_OPMAP(0x4f, 0x01), /* 0x76: SET VENDOR ID */ ISP_FC_OPMAP(0xcd, 0x01), /* 0x77: INITIALIZE IP MAILBOX */ ISP_FC_OPMAP(0x00, 0x00), /* 0x78: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x79: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x7a: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x7b: */ ISP_FC_OPMAP_HALF(0x03, 0x4f, 0x00, 0x07), /* 0x7c: Get ID List */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x7d: SEND LFA */ ISP_FC_OPMAP(0x0f, 0x01) /* 0x7e: LUN RESET */ }; #define MAX_FC_OPCODE 0x7e /* * Footnotes * * (1): this sets bits 21..16 in mailbox register #8, which we nominally * do not access at this time in the core driver. The caller is * responsible for setting this register first (Gross!). The assumption * is that we won't overflow. */ static const char *fc_mbcmd_names[] = { "NO-OP", /* 00h */ "LOAD RAM", "EXEC FIRMWARE", "DUMP RAM", "WRITE RAM WORD", "READ RAM WORD", "MAILBOX REG TEST", "VERIFY CHECKSUM", "ABOUT FIRMWARE", "LOAD RAM (2100)", "DUMP RAM", "LOAD RISC RAM", "DUMP RISC RAM", "WRITE RAM WORD EXTENDED", "CHECK FIRMWARE", "READ RAM WORD EXTENDED", "INIT REQUEST QUEUE", /* 10h */ "INIT RESULT QUEUE", "EXECUTE IOCB", "WAKE UP", "STOP FIRMWARE", "ABORT", "ABORT DEVICE", "ABORT TARGET", "BUS RESET", "STOP QUEUE", "START QUEUE", "SINGLE STEP QUEUE", "ABORT QUEUE", "GET DEV QUEUE STATUS", NULL, "GET FIRMWARE STATUS", "GET LOOP ID", /* 20h */ NULL, "GET TIMEOUT PARAMS", NULL, NULL, NULL, NULL, NULL, "GET FIRMWARE OPTIONS", "GET PORT QUEUE PARAMS", "GENERATE SYSTEM ERROR", NULL, NULL, NULL, NULL, NULL, "WRITE SFP", /* 30h */ "READ SFP", "SET TIMEOUT PARAMS", NULL, NULL, NULL, NULL, NULL, "SET FIRMWARE OPTIONS", "SET PORT QUEUE PARAMS", NULL, "SET FC LED CONF", NULL, "RESTART NIC FIRMWARE", "ACCESS CONTROL", NULL, "LOOP PORT BYPASS", /* 40h */ "LOOP PORT ENABLE", "GET RESOURCE COUNT", "REQUEST NON PARTICIPATING MODE", "DIAGNOSTIC ECHO TEST", "DIAGNOSTIC LOOPBACK", NULL, "GET PORT DATABASE ENHANCED", "INIT FIRMWARE MULTI ID", "GET VP DATABASE", "GET VP DATABASE ENTRY", NULL, NULL, NULL, NULL, NULL, "GET FCF LIST", /* 50h */ "GET DCBX PARAMETERS", NULL, "HOST MEMORY COPY", "EXECUTE IOCB A64", NULL, NULL, "SEND RNID", NULL, "SET PARAMETERS", "GET PARAMETERS", "DRIVER HEARTBEAT", "FIRMWARE HEARTBEAT", "GET/SET DATA RATE", "SEND RNFT", NULL, "INIT FIRMWARE", /* 60h */ "GET INIT CONTROL BLOCK", "INIT LIP", "GET FC-AL POSITION MAP", "GET PORT DATABASE", "CLEAR ACA", "TARGET RESET", "CLEAR TASK SET", "ABORT TASK SET", "GET FW STATE", "GET PORT NAME", "GET LINK STATUS", "INIT LIP RESET", "GET LINK STATS & PRIVATE DATA CNTS", "SEND SNS", "FABRIC LOGIN", "SEND CHANGE REQUEST", /* 70h */ "FABRIC LOGOUT", "INIT LIP LOGIN", NULL, "LOGIN LOOP PORT", "GET PORT/NODE NAME LIST", "SET VENDOR ID", "INITIALIZE IP MAILBOX", NULL, NULL, "GET XGMAC STATS", NULL, "GET ID LIST", "SEND LFA", "LUN RESET" }; static void isp_mboxcmd_qnw(ispsoftc_t *isp, mbreg_t *mbp, int nodelay) { unsigned int ibits, obits, box, opcode; opcode = mbp->param[0]; if (IS_FC(isp)) { ibits = ISP_FC_IBITS(opcode); obits = ISP_FC_OBITS(opcode); } else { ibits = ISP_SCSI_IBITS(opcode); obits = ISP_SCSI_OBITS(opcode); } ibits |= mbp->ibits; obits |= mbp->obits; for (box = 0; box < ISP_NMBOX(isp); box++) { if (ibits & (1 << box)) { ISP_WRITE(isp, MBOX_OFF(box), mbp->param[box]); } if (nodelay == 0) { isp->isp_mboxtmp[box] = mbp->param[box] = 0; } } if (nodelay == 0) { isp->isp_lastmbxcmd = opcode; isp->isp_obits = obits; isp->isp_mboxbsy = 1; } if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_SET_HOST_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_SET_HOST_INT); } /* * Oddly enough, if we're not delaying for an answer, * delay a bit to give the f/w a chance to pick up the * command. */ if (nodelay) { ISP_DELAY(1000); } } static void isp_mboxcmd(ispsoftc_t *isp, mbreg_t *mbp) { const char *cname, *xname, *sname; char tname[16], mname[16]; unsigned int ibits, obits, box, opcode; opcode = mbp->param[0]; if (IS_FC(isp)) { if (opcode > MAX_FC_OPCODE) { mbp->param[0] = MBOX_INVALID_COMMAND; isp_prt(isp, ISP_LOGERR, "Unknown Command 0x%x", opcode); return; } cname = fc_mbcmd_names[opcode]; ibits = ISP_FC_IBITS(opcode); obits = ISP_FC_OBITS(opcode); } else { if (opcode > MAX_SCSI_OPCODE) { mbp->param[0] = MBOX_INVALID_COMMAND; isp_prt(isp, ISP_LOGERR, "Unknown Command 0x%x", opcode); return; } cname = scsi_mbcmd_names[opcode]; ibits = ISP_SCSI_IBITS(opcode); obits = ISP_SCSI_OBITS(opcode); } if (cname == NULL) { cname = tname; ISP_SNPRINTF(tname, sizeof tname, "opcode %x", opcode); } isp_prt(isp, ISP_LOGDEBUG3, "Mailbox Command '%s'", cname); /* * Pick up any additional bits that the caller might have set. */ ibits |= mbp->ibits; obits |= mbp->obits; /* * Mask any bits that the caller wants us to mask */ ibits &= mbp->ibitm; obits &= mbp->obitm; if (ibits == 0 && obits == 0) { mbp->param[0] = MBOX_COMMAND_PARAM_ERROR; isp_prt(isp, ISP_LOGERR, "no parameters for 0x%x", opcode); return; } /* * Get exclusive usage of mailbox registers. */ if (MBOX_ACQUIRE(isp)) { mbp->param[0] = MBOX_REGS_BUSY; goto out; } for (box = 0; box < ISP_NMBOX(isp); box++) { if (ibits & (1 << box)) { isp_prt(isp, ISP_LOGDEBUG3, "IN mbox %d = 0x%04x", box, mbp->param[box]); ISP_WRITE(isp, MBOX_OFF(box), mbp->param[box]); } isp->isp_mboxtmp[box] = mbp->param[box] = 0; } isp->isp_lastmbxcmd = opcode; /* * We assume that we can't overwrite a previous command. */ isp->isp_obits = obits; isp->isp_mboxbsy = 1; /* * Set Host Interrupt condition so that RISC will pick up mailbox regs. */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_SET_HOST_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_SET_HOST_INT); } /* * While we haven't finished the command, spin our wheels here. */ MBOX_WAIT_COMPLETE(isp, mbp); /* * Did the command time out? */ if (mbp->param[0] == MBOX_TIMEOUT) { isp->isp_mboxbsy = 0; MBOX_RELEASE(isp); goto out; } /* * Copy back output registers. */ for (box = 0; box < ISP_NMBOX(isp); box++) { if (obits & (1 << box)) { mbp->param[box] = isp->isp_mboxtmp[box]; isp_prt(isp, ISP_LOGDEBUG3, "OUT mbox %d = 0x%04x", box, mbp->param[box]); } } isp->isp_mboxbsy = 0; MBOX_RELEASE(isp); out: if (mbp->logval == 0 || mbp->param[0] == MBOX_COMMAND_COMPLETE) return; if ((mbp->param[0] & 0xbfe0) == 0 && (mbp->logval & MBLOGMASK(mbp->param[0])) == 0) return; xname = NULL; sname = ""; switch (mbp->param[0]) { case MBOX_INVALID_COMMAND: xname = "INVALID COMMAND"; break; case MBOX_HOST_INTERFACE_ERROR: xname = "HOST INTERFACE ERROR"; break; case MBOX_TEST_FAILED: xname = "TEST FAILED"; break; case MBOX_COMMAND_ERROR: xname = "COMMAND ERROR"; ISP_SNPRINTF(mname, sizeof(mname), " subcode 0x%x", mbp->param[1]); sname = mname; break; case MBOX_COMMAND_PARAM_ERROR: xname = "COMMAND PARAMETER ERROR"; break; case MBOX_PORT_ID_USED: xname = "PORT ID ALREADY IN USE"; break; case MBOX_LOOP_ID_USED: xname = "LOOP ID ALREADY IN USE"; break; case MBOX_ALL_IDS_USED: xname = "ALL LOOP IDS IN USE"; break; case MBOX_NOT_LOGGED_IN: xname = "NOT LOGGED IN"; break; case MBOX_LINK_DOWN_ERROR: xname = "LINK DOWN ERROR"; break; case MBOX_LOOPBACK_ERROR: xname = "LOOPBACK ERROR"; break; case MBOX_CHECKSUM_ERROR: xname = "CHECKSUM ERROR"; break; case MBOX_INVALID_PRODUCT_KEY: xname = "INVALID PRODUCT KEY"; break; case MBOX_REGS_BUSY: xname = "REGISTERS BUSY"; break; case MBOX_TIMEOUT: xname = "TIMEOUT"; break; default: ISP_SNPRINTF(mname, sizeof mname, "error 0x%x", mbp->param[0]); xname = mname; break; } if (xname) { isp_prt(isp, ISP_LOGALL, "Mailbox Command '%s' failed (%s%s)", cname, xname, sname); } } static int isp_fw_state(ispsoftc_t *isp, int chan) { if (IS_FC(isp)) { mbreg_t mbs; MBSINIT(&mbs, MBOX_GET_FW_STATE, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (mbs.param[1]); } } return (FW_ERROR); } static void isp_spi_update(ispsoftc_t *isp, int chan) { int tgt; mbreg_t mbs; sdparam *sdp; if (IS_FC(isp)) { /* * There are no 'per-bus' settings for Fibre Channel. */ return; } sdp = SDPARAM(isp, chan); sdp->update = 0; for (tgt = 0; tgt < MAX_TARGETS; tgt++) { uint16_t flags, period, offset; int get; if (sdp->isp_devparam[tgt].dev_enable == 0) { sdp->isp_devparam[tgt].dev_update = 0; sdp->isp_devparam[tgt].dev_refresh = 0; isp_prt(isp, ISP_LOGDEBUG0, "skipping target %d bus %d update", tgt, chan); continue; } /* * If the goal is to update the status of the device, * take what's in goal_flags and try and set the device * toward that. Otherwise, if we're just refreshing the * current device state, get the current parameters. */ MBSINIT(&mbs, 0, MBLOGALL, 0); /* * Refresh overrides set */ if (sdp->isp_devparam[tgt].dev_refresh) { mbs.param[0] = MBOX_GET_TARGET_PARAMS; get = 1; } else if (sdp->isp_devparam[tgt].dev_update) { mbs.param[0] = MBOX_SET_TARGET_PARAMS; /* * Make sure goal_flags has "Renegotiate on Error" * on and "Freeze Queue on Error" off. */ sdp->isp_devparam[tgt].goal_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].goal_flags &= ~DPARM_QFRZ; mbs.param[2] = sdp->isp_devparam[tgt].goal_flags; /* * Insist that PARITY must be enabled * if SYNC or WIDE is enabled. */ if ((mbs.param[2] & (DPARM_SYNC|DPARM_WIDE)) != 0) { mbs.param[2] |= DPARM_PARITY; } if (mbs.param[2] & DPARM_SYNC) { mbs.param[3] = (sdp->isp_devparam[tgt].goal_offset << 8) | (sdp->isp_devparam[tgt].goal_period); } /* * A command completion later that has * RQSTF_NEGOTIATION set can cause * the dev_refresh/announce cycle also. * * Note: It is really important to update our current * flags with at least the state of TAG capabilities- * otherwise we might try and send a tagged command * when we have it all turned off. So change it here * to say that current already matches goal. */ sdp->isp_devparam[tgt].actv_flags &= ~DPARM_TQING; sdp->isp_devparam[tgt].actv_flags |= (sdp->isp_devparam[tgt].goal_flags & DPARM_TQING); isp_prt(isp, ISP_LOGDEBUG0, "bus %d set tgt %d flags 0x%x off 0x%x period 0x%x", chan, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff); get = 0; } else { continue; } mbs.param[1] = (chan << 15) | (tgt << 8); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { continue; } if (get == 0) { sdp->sendmarker = 1; sdp->isp_devparam[tgt].dev_update = 0; sdp->isp_devparam[tgt].dev_refresh = 1; } else { sdp->isp_devparam[tgt].dev_refresh = 0; flags = mbs.param[2]; period = mbs.param[3] & 0xff; offset = mbs.param[3] >> 8; sdp->isp_devparam[tgt].actv_flags = flags; sdp->isp_devparam[tgt].actv_period = period; sdp->isp_devparam[tgt].actv_offset = offset; isp_async(isp, ISPASYNC_NEW_TGT_PARAMS, chan, tgt); } } for (tgt = 0; tgt < MAX_TARGETS; tgt++) { if (sdp->isp_devparam[tgt].dev_update || sdp->isp_devparam[tgt].dev_refresh) { sdp->update = 1; break; } } } static void isp_setdfltsdparm(ispsoftc_t *isp) { int tgt; sdparam *sdp, *sdp1; sdp = SDPARAM(isp, 0); if (IS_DUALBUS(isp)) sdp1 = sdp + 1; else sdp1 = NULL; /* * Establish some default parameters. */ sdp->isp_cmd_dma_burst_enable = 0; sdp->isp_data_dma_burst_enabl = 1; sdp->isp_fifo_threshold = 0; sdp->isp_initiator_id = DEFAULT_IID(isp, 0); if (isp->isp_type >= ISP_HA_SCSI_1040) { sdp->isp_async_data_setup = 9; } else { sdp->isp_async_data_setup = 6; } sdp->isp_selection_timeout = 250; sdp->isp_max_queue_depth = MAXISPREQUEST(isp); sdp->isp_tag_aging = 8; sdp->isp_bus_reset_delay = 5; /* * Don't retry selection, busy or queue full automatically- reflect * these back to us. */ sdp->isp_retry_count = 0; sdp->isp_retry_delay = 0; for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].exc_throttle = ISP_EXEC_THROTTLE; sdp->isp_devparam[tgt].dev_enable = 1; } /* * The trick here is to establish a default for the default (honk!) * state (goal_flags). Then try and get the current status from * the card to fill in the current state. We don't, in fact, set * the default to the SAFE default state- that's not the goal state. */ for (tgt = 0; tgt < MAX_TARGETS; tgt++) { uint8_t off, per; sdp->isp_devparam[tgt].actv_offset = 0; sdp->isp_devparam[tgt].actv_period = 0; sdp->isp_devparam[tgt].actv_flags = 0; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags = DPARM_DEFAULT; /* * We default to Wide/Fast for versions less than a 1040 * (unless it's SBus). */ if (IS_ULTRA3(isp)) { off = ISP_80M_SYNCPARMS >> 8; per = ISP_80M_SYNCPARMS & 0xff; } else if (IS_ULTRA2(isp)) { off = ISP_40M_SYNCPARMS >> 8; per = ISP_40M_SYNCPARMS & 0xff; } else if (IS_1240(isp)) { off = ISP_20M_SYNCPARMS >> 8; per = ISP_20M_SYNCPARMS & 0xff; } else if ((isp->isp_bustype == ISP_BT_SBUS && isp->isp_type < ISP_HA_SCSI_1020A) || (isp->isp_bustype == ISP_BT_PCI && isp->isp_type < ISP_HA_SCSI_1040) || (isp->isp_clock && isp->isp_clock < 60) || (sdp->isp_ultramode == 0)) { off = ISP_10M_SYNCPARMS >> 8; per = ISP_10M_SYNCPARMS & 0xff; } else { off = ISP_20M_SYNCPARMS_1040 >> 8; per = ISP_20M_SYNCPARMS_1040 & 0xff; } sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset = off; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period = per; } /* * If we're a dual bus card, just copy the data over */ if (sdp1) { *sdp1 = *sdp; sdp1->isp_initiator_id = DEFAULT_IID(isp, 1); } /* * If we've not been told to avoid reading NVRAM, try and read it. * If we're successful reading it, we can then return because NVRAM * will tell us what the desired settings are. Otherwise, we establish * some reasonable 'fake' nvram and goal defaults. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { mbreg_t mbs; if (isp_read_nvram(isp, 0) == 0) { if (IS_DUALBUS(isp)) { if (isp_read_nvram(isp, 1) == 0) { return; } } } MBSINIT(&mbs, MBOX_GET_ACT_NEG_STATE, MBLOGNONE, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { sdp->isp_req_ack_active_neg = 1; sdp->isp_data_line_active_neg = 1; if (sdp1) { sdp1->isp_req_ack_active_neg = 1; sdp1->isp_data_line_active_neg = 1; } } else { sdp->isp_req_ack_active_neg = (mbs.param[1] >> 4) & 0x1; sdp->isp_data_line_active_neg = (mbs.param[1] >> 5) & 0x1; if (sdp1) { sdp1->isp_req_ack_active_neg = (mbs.param[2] >> 4) & 0x1; sdp1->isp_data_line_active_neg = (mbs.param[2] >> 5) & 0x1; } } } } static void isp_setdfltfcparm(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); /* * Establish some default parameters. */ fcp->role = DEFAULT_ROLE(isp, chan); fcp->isp_maxalloc = ICB_DFLT_ALLOC; fcp->isp_retry_delay = ICB_DFLT_RDELAY; fcp->isp_retry_count = ICB_DFLT_RCOUNT; fcp->isp_loopid = DEFAULT_LOOPID(isp, chan); fcp->isp_wwnn_nvram = DEFAULT_NODEWWN(isp, chan); fcp->isp_wwpn_nvram = DEFAULT_PORTWWN(isp, chan); fcp->isp_fwoptions = 0; fcp->isp_xfwoptions = 0; fcp->isp_zfwoptions = 0; fcp->isp_lasthdl = NIL_HANDLE; + fcp->isp_login_hdl = NIL_HANDLE; if (IS_24XX(isp)) { fcp->isp_fwoptions |= ICB2400_OPT1_FAIRNESS; fcp->isp_fwoptions |= ICB2400_OPT1_HARD_ADDRESS; - if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) { + if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) fcp->isp_fwoptions |= ICB2400_OPT1_FULL_DUPLEX; - } fcp->isp_fwoptions |= ICB2400_OPT1_BOTH_WWNS; + fcp->isp_xfwoptions |= ICB2400_OPT2_LOOP_2_PTP; fcp->isp_zfwoptions |= ICB2400_OPT3_RATE_AUTO; } else { fcp->isp_fwoptions |= ICBOPT_FAIRNESS; fcp->isp_fwoptions |= ICBOPT_PDBCHANGE_AE; fcp->isp_fwoptions |= ICBOPT_HARD_ADDRESS; - if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) { + if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) fcp->isp_fwoptions |= ICBOPT_FULL_DUPLEX; - } /* * Make sure this is turned off now until we get * extended options from NVRAM */ fcp->isp_fwoptions &= ~ICBOPT_EXTENDED; + fcp->isp_xfwoptions |= ICBXOPT_LOOP_2_PTP; fcp->isp_zfwoptions |= ICBZOPT_RATE_AUTO; } /* * Now try and read NVRAM unless told to not do so. * This will set fcparam's isp_wwnn_nvram && isp_wwpn_nvram. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { int i, j = 0; /* * Give a couple of tries at reading NVRAM. */ for (i = 0; i < 2; i++) { j = isp_read_nvram(isp, chan); if (j == 0) { break; } } if (j) { isp->isp_confopts |= ISP_CFG_NONVRAM; } } fcp->isp_wwnn = ACTIVE_NODEWWN(isp, chan); fcp->isp_wwpn = ACTIVE_PORTWWN(isp, chan); isp_prt(isp, ISP_LOGCONFIG, "Chan %d 0x%08x%08x/0x%08x%08x Role %s", chan, (uint32_t) (fcp->isp_wwnn >> 32), (uint32_t) (fcp->isp_wwnn), (uint32_t) (fcp->isp_wwpn >> 32), (uint32_t) (fcp->isp_wwpn), isp_class3_roles[fcp->role]); } /* * Re-initialize the ISP and complete all orphaned commands * with a 'botched' notice. The reset/init routines should * not disturb an already active list of commands. */ int isp_reinit(ispsoftc_t *isp, int do_load_defaults) { int i, res = 0; if (isp->isp_state == ISP_RUNSTATE) isp_deinit(isp); if (isp->isp_state != ISP_RESETSTATE) isp_reset(isp, do_load_defaults); if (isp->isp_state != ISP_RESETSTATE) { res = EIO; isp_prt(isp, ISP_LOGERR, "%s: cannot reset card", __func__); ISP_DISABLE_INTS(isp); goto cleanup; } isp_init(isp); if (isp->isp_state > ISP_RESETSTATE && isp->isp_state != ISP_RUNSTATE) { res = EIO; isp_prt(isp, ISP_LOGERR, "%s: cannot init card", __func__); ISP_DISABLE_INTS(isp); if (IS_FC(isp)) { /* * If we're in ISP_ROLE_NONE, turn off the lasers. */ if (!IS_24XX(isp)) { ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } } cleanup: isp->isp_nactive = 0; isp_clear_commands(isp); if (IS_FC(isp)) { for (i = 0; i < isp->isp_nchan; i++) isp_clear_portdb(isp, i); } return (res); } /* * NVRAM Routines */ static int isp_read_nvram(ispsoftc_t *isp, int bus) { int i, amt, retval; uint8_t csum, minversion; union { uint8_t _x[ISP2400_NVRAM_SIZE]; uint16_t _s[ISP2400_NVRAM_SIZE>>1]; } _n; #define nvram_data _n._x #define nvram_words _n._s if (IS_24XX(isp)) { return (isp_read_nvram_2400(isp, nvram_data)); } else if (IS_FC(isp)) { amt = ISP2100_NVRAM_SIZE; minversion = 1; } else if (IS_ULTRA2(isp)) { amt = ISP1080_NVRAM_SIZE; minversion = 0; } else { amt = ISP_NVRAM_SIZE; minversion = 2; } for (i = 0; i < amt>>1; i++) { isp_rdnvram_word(isp, i, &nvram_words[i]); } if (nvram_data[0] != 'I' || nvram_data[1] != 'S' || nvram_data[2] != 'P') { if (isp->isp_bustype != ISP_BT_SBUS) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM header"); isp_prt(isp, ISP_LOGDEBUG0, "%x %x %x", nvram_data[0], nvram_data[1], nvram_data[2]); } retval = -1; goto out; } for (csum = 0, i = 0; i < amt; i++) { csum += nvram_data[i]; } if (csum != 0) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM checksum"); retval = -1; goto out; } if (ISP_NVRAM_VERSION(nvram_data) < minversion) { isp_prt(isp, ISP_LOGWARN, "version %d NVRAM not understood", ISP_NVRAM_VERSION(nvram_data)); retval = -1; goto out; } if (IS_ULTRA3(isp)) { isp_parse_nvram_12160(isp, bus, nvram_data); } else if (IS_1080(isp)) { isp_parse_nvram_1080(isp, bus, nvram_data); } else if (IS_1280(isp) || IS_1240(isp)) { isp_parse_nvram_1080(isp, bus, nvram_data); } else if (IS_SCSI(isp)) { isp_parse_nvram_1020(isp, nvram_data); } else { isp_parse_nvram_2100(isp, nvram_data); } retval = 0; out: return (retval); #undef nvram_data #undef nvram_words } static int isp_read_nvram_2400(ispsoftc_t *isp, uint8_t *nvram_data) { int retval = 0; uint32_t addr, csum, lwrds, *dptr; if (isp->isp_port) { addr = ISP2400_NVRAM_PORT1_ADDR; } else { addr = ISP2400_NVRAM_PORT0_ADDR; } dptr = (uint32_t *) nvram_data; for (lwrds = 0; lwrds < ISP2400_NVRAM_SIZE >> 2; lwrds++) { isp_rd_2400_nvram(isp, addr++, dptr++); } if (nvram_data[0] != 'I' || nvram_data[1] != 'S' || nvram_data[2] != 'P') { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM header (%x %x %x)", nvram_data[0], nvram_data[1], nvram_data[2]); retval = -1; goto out; } dptr = (uint32_t *) nvram_data; for (csum = 0, lwrds = 0; lwrds < ISP2400_NVRAM_SIZE >> 2; lwrds++) { uint32_t tmp; ISP_IOXGET_32(isp, &dptr[lwrds], tmp); csum += tmp; } if (csum != 0) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM checksum"); retval = -1; goto out; } isp_parse_nvram_2400(isp, nvram_data); out: return (retval); } static void isp_rdnvram_word(ispsoftc_t *isp, int wo, uint16_t *rp) { int i, cbits; uint16_t bit, rqst, junk; ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT); ISP_DELAY(10); ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT|BIU_NVRAM_CLOCK); ISP_DELAY(10); if (IS_FC(isp)) { wo &= ((ISP2100_NVRAM_SIZE >> 1) - 1); if (IS_2312(isp) && isp->isp_port) { wo += 128; } rqst = (ISP_NVRAM_READ << 8) | wo; cbits = 10; } else if (IS_ULTRA2(isp)) { wo &= ((ISP1080_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 8) | wo; cbits = 10; } else { wo &= ((ISP_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 6) | wo; cbits = 8; } /* * Clock the word select request out... */ for (i = cbits; i >= 0; i--) { if ((rqst >> i) & 1) { bit = BIU_NVRAM_SELECT | BIU_NVRAM_DATAOUT; } else { bit = BIU_NVRAM_SELECT; } ISP_WRITE(isp, BIU_NVRAM, bit); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_WRITE(isp, BIU_NVRAM, bit | BIU_NVRAM_CLOCK); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_WRITE(isp, BIU_NVRAM, bit); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ } /* * Now read the result back in (bits come back in MSB format). */ *rp = 0; for (i = 0; i < 16; i++) { uint16_t rv; *rp <<= 1; ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT|BIU_NVRAM_CLOCK); ISP_DELAY(10); rv = ISP_READ(isp, BIU_NVRAM); if (rv & BIU_NVRAM_DATAIN) { *rp |= 1; } ISP_DELAY(10); ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ } ISP_WRITE(isp, BIU_NVRAM, 0); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_SWIZZLE_NVRAM_WORD(isp, rp); } static void isp_rd_2400_nvram(ispsoftc_t *isp, uint32_t addr, uint32_t *rp) { int loops = 0; uint32_t base = 0x7ffe0000; uint32_t tmp = 0; if (IS_26XX(isp)) { base = 0x7fe7c000; /* XXX: Observation, may be wrong. */ } else if (IS_25XX(isp)) { base = 0x7ff00000 | 0x48000; } ISP_WRITE(isp, BIU2400_FLASH_ADDR, base | addr); for (loops = 0; loops < 5000; loops++) { ISP_DELAY(10); tmp = ISP_READ(isp, BIU2400_FLASH_ADDR); if ((tmp & (1U << 31)) != 0) { break; } } if (tmp & (1U << 31)) { *rp = ISP_READ(isp, BIU2400_FLASH_DATA); ISP_SWIZZLE_NVRAM_LONG(isp, rp); } else { *rp = 0xffffffff; } } static void isp_parse_nvram_1020(ispsoftc_t *isp, uint8_t *nvram_data) { sdparam *sdp = SDPARAM(isp, 0); int tgt; sdp->isp_fifo_threshold = ISP_NVRAM_FIFO_THRESHOLD(nvram_data) | (ISP_NVRAM_FIFO_THRESHOLD_128(nvram_data) << 2); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP_NVRAM_INITIATOR_ID(nvram_data); sdp->isp_bus_reset_delay = ISP_NVRAM_BUS_RESET_DELAY(nvram_data); sdp->isp_retry_count = ISP_NVRAM_BUS_RETRY_COUNT(nvram_data); sdp->isp_retry_delay = ISP_NVRAM_BUS_RETRY_DELAY(nvram_data); sdp->isp_async_data_setup = ISP_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data); if (isp->isp_type >= ISP_HA_SCSI_1040) { if (sdp->isp_async_data_setup < 9) { sdp->isp_async_data_setup = 9; } } else { if (sdp->isp_async_data_setup != 6) { sdp->isp_async_data_setup = 6; } } sdp->isp_req_ack_active_neg = ISP_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data); sdp->isp_data_line_active_neg = ISP_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data); sdp->isp_data_dma_burst_enabl = ISP_NVRAM_DATA_DMA_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP_NVRAM_CMD_DMA_BURST_ENABLE(nvram_data); sdp->isp_tag_aging = ISP_NVRAM_TAG_AGE_LIMIT(nvram_data); sdp->isp_selection_timeout = ISP_NVRAM_SELECTION_TIMEOUT(nvram_data); sdp->isp_max_queue_depth = ISP_NVRAM_MAX_QUEUE_DEPTH(nvram_data); sdp->isp_fast_mttr = ISP_NVRAM_FAST_MTTR_ENABLE(nvram_data); for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt); sdp->isp_devparam[tgt].exc_throttle = ISP_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt); sdp->isp_devparam[tgt].nvrm_offset = ISP_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt); sdp->isp_devparam[tgt].nvrm_period = ISP_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt); /* * We probably shouldn't lie about this, but it * it makes it much safer if we limit NVRAM values * to sanity. */ if (isp->isp_type < ISP_HA_SCSI_1040) { /* * If we're not ultra, we can't possibly * be a shorter period than this. */ if (sdp->isp_devparam[tgt].nvrm_period < 0x19) { sdp->isp_devparam[tgt].nvrm_period = 0x19; } if (sdp->isp_devparam[tgt].nvrm_offset > 0xc) { sdp->isp_devparam[tgt].nvrm_offset = 0x0c; } } else { if (sdp->isp_devparam[tgt].nvrm_offset > 0x8) { sdp->isp_devparam[tgt].nvrm_offset = 0x8; } } sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP_NVRAM_TGT_RENEG(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP_NVRAM_TGT_TQING(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP_NVRAM_TGT_SYNC(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP_NVRAM_TGT_WIDE(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP_NVRAM_TGT_PARITY(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP_NVRAM_TGT_DISC(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; /* we don't know */ sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_1080(ispsoftc_t *isp, int bus, uint8_t *nvram_data) { sdparam *sdp = SDPARAM(isp, bus); int tgt; sdp->isp_fifo_threshold = ISP1080_NVRAM_FIFO_THRESHOLD(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP1080_NVRAM_INITIATOR_ID(nvram_data, bus); sdp->isp_bus_reset_delay = ISP1080_NVRAM_BUS_RESET_DELAY(nvram_data, bus); sdp->isp_retry_count = ISP1080_NVRAM_BUS_RETRY_COUNT(nvram_data, bus); sdp->isp_retry_delay = ISP1080_NVRAM_BUS_RETRY_DELAY(nvram_data, bus); sdp->isp_async_data_setup = ISP1080_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data, bus); sdp->isp_req_ack_active_neg = ISP1080_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_line_active_neg = ISP1080_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_dma_burst_enabl = ISP1080_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP1080_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_selection_timeout = ISP1080_NVRAM_SELECTION_TIMEOUT(nvram_data, bus); sdp->isp_max_queue_depth = ISP1080_NVRAM_MAX_QUEUE_DEPTH(nvram_data, bus); for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP1080_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].exc_throttle = ISP1080_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_offset = ISP1080_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_period = ISP1080_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP1080_NVRAM_TGT_RENEG(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP1080_NVRAM_TGT_TQING(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP1080_NVRAM_TGT_SYNC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP1080_NVRAM_TGT_WIDE(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP1080_NVRAM_TGT_PARITY(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP1080_NVRAM_TGT_DISC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_12160(ispsoftc_t *isp, int bus, uint8_t *nvram_data) { sdparam *sdp = SDPARAM(isp, bus); int tgt; sdp->isp_fifo_threshold = ISP12160_NVRAM_FIFO_THRESHOLD(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP12160_NVRAM_INITIATOR_ID(nvram_data, bus); sdp->isp_bus_reset_delay = ISP12160_NVRAM_BUS_RESET_DELAY(nvram_data, bus); sdp->isp_retry_count = ISP12160_NVRAM_BUS_RETRY_COUNT(nvram_data, bus); sdp->isp_retry_delay = ISP12160_NVRAM_BUS_RETRY_DELAY(nvram_data, bus); sdp->isp_async_data_setup = ISP12160_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data, bus); sdp->isp_req_ack_active_neg = ISP12160_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_line_active_neg = ISP12160_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_dma_burst_enabl = ISP12160_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP12160_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_selection_timeout = ISP12160_NVRAM_SELECTION_TIMEOUT(nvram_data, bus); sdp->isp_max_queue_depth = ISP12160_NVRAM_MAX_QUEUE_DEPTH(nvram_data, bus); for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP12160_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].exc_throttle = ISP12160_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_offset = ISP12160_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_period = ISP12160_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP12160_NVRAM_TGT_RENEG(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP12160_NVRAM_TGT_TQING(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP12160_NVRAM_TGT_SYNC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP12160_NVRAM_TGT_WIDE(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP12160_NVRAM_TGT_PARITY(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP12160_NVRAM_TGT_DISC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_2100(ispsoftc_t *isp, uint8_t *nvram_data) { fcparam *fcp = FCPARAM(isp, 0); uint64_t wwn; /* * There is NVRAM storage for both Port and Node entities- * but the Node entity appears to be unused on all the cards * I can find. However, we should account for this being set * at some point in the future. * * Qlogic WWNs have an NAA of 2, but usually nothing shows up in * bits 48..60. In the case of the 2202, it appears that they do * use bit 48 to distinguish between the two instances on the card. * The 2204, which I've never seen, *probably* extends this method. */ wwn = ISP2100_NVRAM_PORT_NAME(nvram_data); if (wwn) { isp_prt(isp, ISP_LOGCONFIG, "NVRAM Port WWN 0x%08x%08x", (uint32_t) (wwn >> 32), (uint32_t) (wwn)); if ((wwn >> 60) == 0) { wwn |= (((uint64_t) 2)<< 60); } } fcp->isp_wwpn_nvram = wwn; if (IS_2200(isp) || IS_23XX(isp)) { wwn = ISP2100_NVRAM_NODE_NAME(nvram_data); if (wwn) { isp_prt(isp, ISP_LOGCONFIG, "NVRAM Node WWN 0x%08x%08x", (uint32_t) (wwn >> 32), (uint32_t) (wwn)); if ((wwn >> 60) == 0) { wwn |= (((uint64_t) 2)<< 60); } } else { wwn = fcp->isp_wwpn_nvram & ~((uint64_t) 0xfff << 48); } } else { wwn &= ~((uint64_t) 0xfff << 48); } fcp->isp_wwnn_nvram = wwn; fcp->isp_maxalloc = ISP2100_NVRAM_MAXIOCBALLOCATION(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNFSZ) == 0) { DEFAULT_FRAMESIZE(isp) = ISP2100_NVRAM_MAXFRAMELENGTH(nvram_data); } fcp->isp_retry_delay = ISP2100_NVRAM_RETRY_DELAY(nvram_data); fcp->isp_retry_count = ISP2100_NVRAM_RETRY_COUNT(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) { fcp->isp_loopid = ISP2100_NVRAM_HARDLOOPID(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNEXCTHROTTLE) == 0) { DEFAULT_EXEC_THROTTLE(isp) = ISP2100_NVRAM_EXECUTION_THROTTLE(nvram_data); } fcp->isp_fwoptions = ISP2100_NVRAM_OPTIONS(nvram_data); isp_prt(isp, ISP_LOGDEBUG0, "NVRAM 0x%08x%08x 0x%08x%08x maxalloc %d maxframelen %d", (uint32_t) (fcp->isp_wwnn_nvram >> 32), (uint32_t) fcp->isp_wwnn_nvram, (uint32_t) (fcp->isp_wwpn_nvram >> 32), (uint32_t) fcp->isp_wwpn_nvram, ISP2100_NVRAM_MAXIOCBALLOCATION(nvram_data), ISP2100_NVRAM_MAXFRAMELENGTH(nvram_data)); isp_prt(isp, ISP_LOGDEBUG0, "execthrottle %d fwoptions 0x%x hardloop %d tov %d", ISP2100_NVRAM_EXECUTION_THROTTLE(nvram_data), ISP2100_NVRAM_OPTIONS(nvram_data), ISP2100_NVRAM_HARDLOOPID(nvram_data), ISP2100_NVRAM_TOV(nvram_data)); fcp->isp_xfwoptions = ISP2100_XFW_OPTIONS(nvram_data); fcp->isp_zfwoptions = ISP2100_ZFW_OPTIONS(nvram_data); isp_prt(isp, ISP_LOGDEBUG0, "xfwoptions 0x%x zfw options 0x%x", ISP2100_XFW_OPTIONS(nvram_data), ISP2100_ZFW_OPTIONS(nvram_data)); } static void isp_parse_nvram_2400(ispsoftc_t *isp, uint8_t *nvram_data) { fcparam *fcp = FCPARAM(isp, 0); uint64_t wwn; isp_prt(isp, ISP_LOGDEBUG0, "NVRAM 0x%08x%08x 0x%08x%08x exchg_cnt %d maxframelen %d", (uint32_t) (ISP2400_NVRAM_NODE_NAME(nvram_data) >> 32), (uint32_t) (ISP2400_NVRAM_NODE_NAME(nvram_data)), (uint32_t) (ISP2400_NVRAM_PORT_NAME(nvram_data) >> 32), (uint32_t) (ISP2400_NVRAM_PORT_NAME(nvram_data)), ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data), ISP2400_NVRAM_MAXFRAMELENGTH(nvram_data)); isp_prt(isp, ISP_LOGDEBUG0, "NVRAM execthr %d loopid %d fwopt1 0x%x fwopt2 0x%x fwopt3 0x%x", ISP2400_NVRAM_EXECUTION_THROTTLE(nvram_data), ISP2400_NVRAM_HARDLOOPID(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS1(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS2(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS3(nvram_data)); wwn = ISP2400_NVRAM_PORT_NAME(nvram_data); fcp->isp_wwpn_nvram = wwn; wwn = ISP2400_NVRAM_NODE_NAME(nvram_data); if (wwn) { if ((wwn >> 60) != 2 && (wwn >> 60) != 5) { wwn = 0; } } if (wwn == 0 && (fcp->isp_wwpn_nvram >> 60) == 2) { wwn = fcp->isp_wwpn_nvram; wwn &= ~((uint64_t) 0xfff << 48); } fcp->isp_wwnn_nvram = wwn; if (ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data)) { fcp->isp_maxalloc = ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNFSZ) == 0) { DEFAULT_FRAMESIZE(isp) = ISP2400_NVRAM_MAXFRAMELENGTH(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) { fcp->isp_loopid = ISP2400_NVRAM_HARDLOOPID(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNEXCTHROTTLE) == 0) { DEFAULT_EXEC_THROTTLE(isp) = ISP2400_NVRAM_EXECUTION_THROTTLE(nvram_data); } fcp->isp_fwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS1(nvram_data); fcp->isp_xfwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS2(nvram_data); fcp->isp_zfwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS3(nvram_data); } Index: projects/release-pkg/sys/dev/isp/ispvar.h =================================================================== --- projects/release-pkg/sys/dev/isp/ispvar.h (revision 297926) +++ projects/release-pkg/sys/dev/isp/ispvar.h (revision 297927) @@ -1,1156 +1,1158 @@ /* $FreeBSD$ */ /*- * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Soft Definitions for for Qlogic ISP SCSI adapters. */ #ifndef _ISPVAR_H #define _ISPVAR_H #if defined(__NetBSD__) || defined(__OpenBSD__) #include #include #endif #ifdef __FreeBSD__ #include #include #endif #ifdef __linux__ #include "isp_stds.h" #include "ispmbox.h" #endif #ifdef __svr4__ #include "isp_stds.h" #include "ispmbox.h" #endif #define ISP_CORE_VERSION_MAJOR 7 #define ISP_CORE_VERSION_MINOR 0 /* * Vector for bus specific code to provide specific services. */ typedef struct ispsoftc ispsoftc_t; struct ispmdvec { int (*dv_rd_isr) (ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); uint32_t (*dv_rd_reg) (ispsoftc_t *, int); void (*dv_wr_reg) (ispsoftc_t *, int, uint32_t); int (*dv_mbxdma) (ispsoftc_t *); int (*dv_dmaset) (ispsoftc_t *, XS_T *, void *); void (*dv_dmaclr) (ispsoftc_t *, XS_T *, uint32_t); void (*dv_reset0) (ispsoftc_t *); void (*dv_reset1) (ispsoftc_t *); void (*dv_dregs) (ispsoftc_t *, const char *); const void * dv_ispfw; /* ptr to f/w */ uint16_t dv_conf1; uint16_t dv_clock; /* clock frequency */ }; /* * Overall parameters */ #define MAX_TARGETS 16 #ifndef MAX_FC_TARG #define MAX_FC_TARG 1024 #endif #define ISP_MAX_TARGETS(isp) (IS_FC(isp)? MAX_FC_TARG : MAX_TARGETS) #define ISP_MAX_LUNS(isp) (isp)->isp_maxluns /* * Macros to access ISP registers through bus specific layers- * mostly wrappers to vector through the mdvec structure. */ #define ISP_READ_ISR(isp, isrp, semap, info) \ (*(isp)->isp_mdvec->dv_rd_isr)(isp, isrp, semap, info) #define ISP_READ(isp, reg) \ (*(isp)->isp_mdvec->dv_rd_reg)((isp), (reg)) #define ISP_WRITE(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), (val)) #define ISP_MBOXDMASETUP(isp) \ (*(isp)->isp_mdvec->dv_mbxdma)((isp)) #define ISP_DMASETUP(isp, xs, req) \ (*(isp)->isp_mdvec->dv_dmaset)((isp), (xs), (req)) #define ISP_DMAFREE(isp, xs, hndl) \ if ((isp)->isp_mdvec->dv_dmaclr) \ (*(isp)->isp_mdvec->dv_dmaclr)((isp), (xs), (hndl)) #define ISP_RESET0(isp) \ if ((isp)->isp_mdvec->dv_reset0) (*(isp)->isp_mdvec->dv_reset0)((isp)) #define ISP_RESET1(isp) \ if ((isp)->isp_mdvec->dv_reset1) (*(isp)->isp_mdvec->dv_reset1)((isp)) #define ISP_DUMPREGS(isp, m) \ if ((isp)->isp_mdvec->dv_dregs) (*(isp)->isp_mdvec->dv_dregs)((isp),(m)) #define ISP_SETBITS(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), ISP_READ((isp), (reg)) | (val)) #define ISP_CLRBITS(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), ISP_READ((isp), (reg)) & ~(val)) /* * The MEMORYBARRIER macro is defined per platform (to provide synchronization * on Request and Response Queues, Scratch DMA areas, and Registers) * * Defined Memory Barrier Synchronization Types */ #define SYNC_REQUEST 0 /* request queue synchronization */ #define SYNC_RESULT 1 /* result queue synchronization */ #define SYNC_SFORDEV 2 /* scratch, sync for ISP */ #define SYNC_SFORCPU 3 /* scratch, sync for CPU */ #define SYNC_REG 4 /* for registers */ #define SYNC_ATIOQ 5 /* atio result queue (24xx) */ #define SYNC_IFORDEV 6 /* synchrounous IOCB, sync for ISP */ #define SYNC_IFORCPU 7 /* synchrounous IOCB, sync for CPU */ /* * Request/Response Queue defines and macros. * The maximum is defined per platform (and can be based on board type). */ /* This is the size of a queue entry (request and response) */ #define QENTRY_LEN 64 /* Both request and result queue length must be a power of two */ #define RQUEST_QUEUE_LEN(x) MAXISPREQUEST(x) #ifdef ISP_TARGET_MODE #define RESULT_QUEUE_LEN(x) MAXISPREQUEST(x) #else #define RESULT_QUEUE_LEN(x) \ (((MAXISPREQUEST(x) >> 2) < 64)? 64 : MAXISPREQUEST(x) >> 2) #endif #define ISP_QUEUE_ENTRY(q, idx) (((uint8_t *)q) + ((idx) * QENTRY_LEN)) #define ISP_QUEUE_SIZE(n) ((n) * QENTRY_LEN) #define ISP_NXT_QENTRY(idx, qlen) (((idx) + 1) & ((qlen)-1)) #define ISP_QFREE(in, out, qlen) \ ((in == out)? (qlen - 1) : ((in > out)? \ ((qlen - 1) - (in - out)) : (out - in - 1))) #define ISP_QAVAIL(isp) \ ISP_QFREE(isp->isp_reqidx, isp->isp_reqodx, RQUEST_QUEUE_LEN(isp)) #define ISP_ADD_REQUEST(isp, nxti) \ MEMORYBARRIER(isp, SYNC_REQUEST, isp->isp_reqidx, QENTRY_LEN, -1); \ ISP_WRITE(isp, isp->isp_rqstinrp, nxti); \ isp->isp_reqidx = nxti #define ISP_SYNC_REQUEST(isp) \ MEMORYBARRIER(isp, SYNC_REQUEST, isp->isp_reqidx, QENTRY_LEN, -1); \ isp->isp_reqidx = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp)); \ ISP_WRITE(isp, isp->isp_rqstinrp, isp->isp_reqidx) /* * SCSI Specific Host Adapter Parameters- per bus, per target */ typedef struct { uint32_t : 8, update : 1, sendmarker : 1, isp_req_ack_active_neg : 1, isp_data_line_active_neg: 1, isp_cmd_dma_burst_enable: 1, isp_data_dma_burst_enabl: 1, isp_fifo_threshold : 3, isp_ptisp : 1, isp_ultramode : 1, isp_diffmode : 1, isp_lvdmode : 1, isp_fast_mttr : 1, /* fast sram */ isp_initiator_id : 4, isp_async_data_setup : 4; uint16_t isp_selection_timeout; uint16_t isp_max_queue_depth; uint8_t isp_tag_aging; uint8_t isp_bus_reset_delay; uint8_t isp_retry_count; uint8_t isp_retry_delay; struct { uint32_t exc_throttle : 8, : 1, dev_enable : 1, /* ignored */ dev_update : 1, dev_refresh : 1, actv_offset : 4, goal_offset : 4, nvrm_offset : 4; uint8_t actv_period; /* current sync period */ uint8_t goal_period; /* goal sync period */ uint8_t nvrm_period; /* nvram sync period */ uint16_t actv_flags; /* current device flags */ uint16_t goal_flags; /* goal device flags */ uint16_t nvrm_flags; /* nvram device flags */ } isp_devparam[MAX_TARGETS]; } sdparam; /* * Device Flags */ #define DPARM_DISC 0x8000 #define DPARM_PARITY 0x4000 #define DPARM_WIDE 0x2000 #define DPARM_SYNC 0x1000 #define DPARM_TQING 0x0800 #define DPARM_ARQ 0x0400 #define DPARM_QFRZ 0x0200 #define DPARM_RENEG 0x0100 #define DPARM_NARROW 0x0080 #define DPARM_ASYNC 0x0040 #define DPARM_PPR 0x0020 #define DPARM_DEFAULT (0xFF00 & ~DPARM_QFRZ) #define DPARM_SAFE_DFLT (DPARM_DEFAULT & ~(DPARM_WIDE|DPARM_SYNC|DPARM_TQING)) /* technically, not really correct, as they need to be rated based upon clock */ #define ISP_80M_SYNCPARMS 0x0c09 #define ISP_40M_SYNCPARMS 0x0c0a #define ISP_20M_SYNCPARMS 0x0c0c #define ISP_20M_SYNCPARMS_1040 0x080c #define ISP_10M_SYNCPARMS 0x0c19 #define ISP_08M_SYNCPARMS 0x0c25 #define ISP_05M_SYNCPARMS 0x0c32 #define ISP_04M_SYNCPARMS 0x0c41 /* * Fibre Channel Specifics */ /* These are for non-2K Login Firmware cards */ #define FL_ID 0x7e /* FL_Port Special ID */ #define SNS_ID 0x80 /* SNS Server Special ID */ #define NPH_MAX 0xfe /* These are for 2K Login Firmware cards */ #define NPH_RESERVED 0x7F0 /* begin of reserved N-port handles */ #define NPH_MGT_ID 0x7FA /* Management Server Special ID */ #define NPH_SNS_ID 0x7FC /* SNS Server Special ID */ #define NPH_FABRIC_CTLR 0x7FD /* Fabric Controller (0xFFFFFD) */ #define NPH_FL_ID 0x7FE /* F Port Special ID (0xFFFFFE) */ #define NPH_IP_BCST 0x7FF /* IP Broadcast Special ID (0xFFFFFF) */ #define NPH_MAX_2K 0x800 /* * "Unassigned" handle to be used internally */ #define NIL_HANDLE 0xffff /* * Limit for devices on an arbitrated loop. */ #define LOCAL_LOOP_LIM 126 /* * Limit for (2K login) N-port handle amounts */ #define MAX_NPORT_HANDLE 2048 /* * Special Constants */ #define INI_NONE ((uint64_t) 0) #define ISP_NOCHAN 0xff /* * Special Port IDs */ #define MANAGEMENT_PORT_ID 0xFFFFFA #define SNS_PORT_ID 0xFFFFFC #define FABRIC_PORT_ID 0xFFFFFE #define PORT_ANY 0xFFFFFF #define PORT_NONE 0 #define VALID_PORT(port) (port != PORT_NONE && port != PORT_ANY) #define DOMAIN_CONTROLLER_BASE 0xFFFC00 #define DOMAIN_CONTROLLER_END 0xFFFCFF /* * Command Handles * * Most QLogic initiator or target have 32 bit handles associated with them. * We want to have a quick way to index back and forth between a local SCSI * command context and what the firmware is passing back to us. We also * want to avoid working on stale information. This structure handles both * at the expense of some local memory. * * The handle is architected thusly: * * 0 means "free handle" * bits 0..12 index commands * bits 13..15 bits index usage * bits 16..31 contain a rolling sequence * * */ typedef struct { void * cmd; /* associated command context */ uint32_t handle; /* handle associated with this command */ } isp_hdl_t; #define ISP_HANDLE_FREE 0x00000000 #define ISP_HANDLE_CMD_MASK 0x00001fff #define ISP_HANDLE_USAGE_MASK 0x0000e000 #define ISP_HANDLE_USAGE_SHIFT 13 #define ISP_H2HT(hdl) ((hdl & ISP_HANDLE_USAGE_MASK) >> ISP_HANDLE_USAGE_SHIFT) # define ISP_HANDLE_NONE 0 # define ISP_HANDLE_INITIATOR 1 # define ISP_HANDLE_TARGET 2 # define ISP_HANDLE_CTRL 3 #define ISP_HANDLE_SEQ_MASK 0xffff0000 #define ISP_HANDLE_SEQ_SHIFT 16 #define ISP_H2SEQ(hdl) ((hdl & ISP_HANDLE_SEQ_MASK) >> ISP_HANDLE_SEQ_SHIFT) #define ISP_VALID_HANDLE(c, hdl) \ ((ISP_H2HT(hdl) == ISP_HANDLE_INITIATOR || \ ISP_H2HT(hdl) == ISP_HANDLE_TARGET || \ ISP_H2HT(hdl) == ISP_HANDLE_CTRL) && \ ((hdl) & ISP_HANDLE_CMD_MASK) < (c)->isp_maxcmds && \ (hdl) == ((c)->isp_xflist[(hdl) & ISP_HANDLE_CMD_MASK].handle)) #define ISP_BAD_HANDLE_INDEX 0xffffffff /* * FC Port Database entry. * * It has a handle that the f/w uses to address commands to a device. * This handle's value may be assigned by the firmware (e.g., for local loop * devices) or by the driver (e.g., for fabric devices). * * It has a state. If the state if VALID, that means that we've logged into * the device. * * Local loop devices the firmware automatically performs PLOGI on for us * (which is why that handle is imposed upon us). Fabric devices we assign * a handle to and perform the PLOGI on. * * When a PORT DATABASE CHANGED asynchronous event occurs, we mark all VALID * entries as PROBATIONAL. This allows us, if policy says to, just keep track * of devices whose handles change but are otherwise the same device (and * thus keep 'target' constant). * * In any case, we search all possible local loop handles. For each one that * has a port database entity returned, we search for any PROBATIONAL entry * that matches it and update as appropriate. Otherwise, as a new entry, we * find room for it in the Port Database. We *try* and use the handle as the * index to put it into the Database, but that's just an optimization. We mark * the entry VALID and make sure that the target index is updated and correct. * * When we get done searching the local loop, we then search similarily for * a list of devices we've gotten from the fabric name controller (if we're * on a fabric). VALID marking is also done similarily. * * When all of this is done, we can march through the database and clean up * any entry that is still PROBATIONAL (these represent devices which have * departed). Then we're done and can resume normal operations. * * Negative invariants that we try and test for are: * * + There can never be two non-NIL entries with the same { Port, Node } WWN * duples. * * + There can never be two non-NIL entries with the same handle. */ typedef struct { /* * This is the handle that the firmware needs in order for us to * send commands to the device. For pre-24XX cards, this would be * the 'loopid'. */ uint16_t handle; /* * A device is 'autologin' if the firmware automatically logs into * it (re-logins as needed). Basically, local private loop devices. * * PRLI word 3 parameters contains role as well as other things. * * The state is the current state of this entry. * * The is_target is the current state of target on this port. * * The is_initiator is the current state of initiator on this port. * * Portid is obvious, as are node && port WWNs. The new_role and * new_portid is for when we are pending a change. */ uint16_t prli_word3; /* PRLI parameters */ uint16_t new_prli_word3; /* Incoming new PRLI parameters */ uint16_t : 11, autologin : 1, /* F/W does PLOGI/PLOGO */ probational : 1, state : 3; uint32_t : 6, is_target : 1, is_initiator : 1, portid : 24; uint32_t : 8, new_portid : 24; uint64_t node_wwn; uint64_t port_wwn; uint32_t gone_timer; } fcportdb_t; #define FC_PORTDB_STATE_NIL 0 /* Empty DB slot */ #define FC_PORTDB_STATE_DEAD 1 /* Was valid, but no more. */ #define FC_PORTDB_STATE_CHANGED 2 /* Was valid, but changed. */ #define FC_PORTDB_STATE_NEW 3 /* Logged in, not announced. */ #define FC_PORTDB_STATE_ZOMBIE 4 /* Invalid, but announced. */ #define FC_PORTDB_STATE_VALID 5 /* Valid */ #define FC_PORTDB_TGT(isp, bus, pdb) (int)(lp - FCPARAM(isp, bus)->portdb) /* * FC card specific information * * This structure is replicated across multiple channels for multi-id * capapble chipsets, with some entities different on a per-channel basis. */ typedef struct { int isp_gbspeed; /* Connection speed */ int isp_linkstate; /* Link state */ int isp_fwstate; /* ISP F/W state */ int isp_loopstate; /* Loop State */ int isp_topo; /* Connection Type */ uint32_t : 4, fctape_enabled : 1, sendmarker : 1, role : 2, isp_portid : 24; /* S_ID */ uint16_t isp_fwoptions; uint16_t isp_xfwoptions; uint16_t isp_zfwoptions; uint16_t isp_loopid; /* hard loop id */ uint16_t isp_sns_hdl; /* N-port handle for SNS */ uint16_t isp_lasthdl; /* only valid for channel 0 */ uint16_t isp_maxalloc; uint16_t isp_fabric_params; + uint16_t isp_login_hdl; /* Logging in handle */ uint8_t isp_retry_delay; uint8_t isp_retry_count; /* * Current active WWNN/WWPN */ uint64_t isp_wwnn; uint64_t isp_wwpn; /* * NVRAM WWNN/WWPN */ uint64_t isp_wwnn_nvram; uint64_t isp_wwpn_nvram; /* * Our Port Data Base */ fcportdb_t portdb[MAX_FC_TARG]; /* * Scratch DMA mapped in area to fetch Port Database stuff, etc. */ void * isp_scratch; XS_DMA_ADDR_T isp_scdma; uint8_t isp_scanscratch[ISP_FC_SCRLEN]; } fcparam; #define FW_CONFIG_WAIT 0 #define FW_WAIT_LINK 1 #define FW_WAIT_LOGIN 2 #define FW_READY 3 #define FW_LOSS_OF_SYNC 4 #define FW_ERROR 5 #define FW_REINIT 6 #define FW_NON_PART 7 #define LOOP_NIL 0 #define LOOP_HAVE_LINK 1 #define LOOP_TESTING_LINK 2 #define LOOP_LTEST_DONE 3 #define LOOP_SCANNING_LOOP 4 #define LOOP_LSCAN_DONE 5 #define LOOP_SCANNING_FABRIC 6 #define LOOP_FSCAN_DONE 7 #define LOOP_SYNCING_PDB 8 #define LOOP_READY 9 #define TOPO_NL_PORT 0 #define TOPO_FL_PORT 1 #define TOPO_N_PORT 2 #define TOPO_F_PORT 3 #define TOPO_PTP_STUB 4 #define TOPO_IS_FABRIC(x) ((x) == TOPO_FL_PORT || (x) == TOPO_F_PORT) /* * Soft Structure per host adapter */ struct ispsoftc { /* * Platform (OS) specific data */ struct isposinfo isp_osinfo; /* * Pointer to bus specific functions and data */ struct ispmdvec * isp_mdvec; /* * (Mostly) nonvolatile state. Board specific parameters * may contain some volatile state (e.g., current loop state). */ void * isp_param; /* type specific */ uint64_t isp_fwattr; /* firmware attributes */ uint16_t isp_fwrev[3]; /* Loaded F/W revision */ uint16_t isp_maxcmds; /* max possible I/O cmds */ uint8_t isp_type; /* HBA Chip Type */ uint8_t isp_revision; /* HBA Chip H/W Revision */ uint16_t isp_nchan; /* number of channels */ uint32_t isp_maxluns; /* maximum luns supported */ uint32_t isp_clock : 8, /* input clock */ : 4, isp_port : 1, /* 23XX/24XX only */ isp_open : 1, /* opened (ioctl) */ isp_bustype : 1, /* SBus or PCI */ isp_loaded_fw : 1, /* loaded firmware */ isp_dblev : 16; /* debug log mask */ uint32_t isp_confopts; /* config options */ uint32_t isp_rqstinrp; /* register for REQINP */ uint32_t isp_rqstoutrp; /* register for REQOUTP */ uint32_t isp_respinrp; /* register for RESINP */ uint32_t isp_respoutrp; /* register for RESOUTP */ /* * Instrumentation */ uint64_t isp_intcnt; /* total int count */ uint64_t isp_intbogus; /* spurious int count */ uint64_t isp_intmboxc; /* mbox completions */ uint64_t isp_intoasync; /* other async */ uint64_t isp_rsltccmplt; /* CMDs on result q */ uint64_t isp_fphccmplt; /* CMDs via fastpost */ uint16_t isp_rscchiwater; uint16_t isp_fpcchiwater; NANOTIME_T isp_init_time; /* time were last initialized */ /* * Volatile state */ volatile uint32_t : 8, : 2, isp_dead : 1, : 1, isp_mboxbsy : 1, /* mailbox command active */ isp_state : 3, isp_nactive : 16; /* how many commands active */ volatile mbreg_t isp_curmbx; /* currently active mailbox command */ volatile uint32_t isp_reqodx; /* index of last ISP pickup */ volatile uint32_t isp_reqidx; /* index of next request */ volatile uint32_t isp_residx; /* index of last ISP write */ volatile uint32_t isp_resodx; /* index of next result */ volatile uint32_t isp_atioodx; /* index of next ATIO */ volatile uint32_t isp_obits; /* mailbox command output */ volatile uint32_t isp_serno; /* rolling serial number */ volatile uint16_t isp_mboxtmp[MAX_MAILBOX]; volatile uint16_t isp_lastmbxcmd; /* last mbox command sent */ volatile uint16_t isp_mbxwrk0; volatile uint16_t isp_mbxwrk1; volatile uint16_t isp_mbxwrk2; volatile uint16_t isp_mbxwrk8; volatile uint16_t isp_seqno; /* running sequence number */ void * isp_mbxworkp; /* * Active commands are stored here, indexed by handle functions. */ isp_hdl_t *isp_xflist; isp_hdl_t *isp_xffree; /* * DMA mapped in area for synchronous IOCB requests. */ void * isp_iocb; XS_DMA_ADDR_T isp_iocb_dma; /* * request/result queue pointers and DMA handles for them. */ void * isp_rquest; void * isp_result; XS_DMA_ADDR_T isp_rquest_dma; XS_DMA_ADDR_T isp_result_dma; #ifdef ISP_TARGET_MODE /* for 24XX only */ void * isp_atioq; XS_DMA_ADDR_T isp_atioq_dma; #endif }; #define SDPARAM(isp, chan) (&((sdparam *)(isp)->isp_param)[(chan)]) #define FCPARAM(isp, chan) (&((fcparam *)(isp)->isp_param)[(chan)]) #define ISP_SET_SENDMARKER(isp, chan, val) \ if (IS_FC(isp)) { \ FCPARAM(isp, chan)->sendmarker = val; \ } else { \ SDPARAM(isp, chan)->sendmarker = val; \ } #define ISP_TST_SENDMARKER(isp, chan) \ (IS_FC(isp)? \ FCPARAM(isp, chan)->sendmarker != 0 : \ SDPARAM(isp, chan)->sendmarker != 0) /* * ISP Driver Run States */ #define ISP_NILSTATE 0 #define ISP_CRASHED 1 #define ISP_RESETSTATE 2 #define ISP_INITSTATE 3 #define ISP_RUNSTATE 4 /* * ISP Runtime Configuration Options */ #define ISP_CFG_FULL_DUPLEX 0x01 /* Full Duplex (Fibre Channel only) */ -#define ISP_CFG_PORT_PREF 0x0c /* Mask for Port Prefs (all FC except 2100) */ -#define ISP_CFG_LPORT 0x00 /* prefer {N/F}L-Port connection */ -#define ISP_CFG_NPORT 0x04 /* prefer {N/F}-Port connection */ -#define ISP_CFG_NPORT_ONLY 0x08 /* insist on {N/F}-Port connection */ -#define ISP_CFG_LPORT_ONLY 0x0c /* insist on {N/F}L-Port connection */ +#define ISP_CFG_PORT_PREF 0x0e /* Mask for Port Prefs (all FC except 2100) */ +#define ISP_CFG_PORT_DEF 0x00 /* prefer connection type from NVRAM */ +#define ISP_CFG_LPORT_ONLY 0x02 /* insist on {N/F}L-Port connection */ +#define ISP_CFG_NPORT_ONLY 0x04 /* insist on {N/F}-Port connection */ +#define ISP_CFG_LPORT 0x06 /* prefer {N/F}L-Port connection */ +#define ISP_CFG_NPORT 0x08 /* prefer {N/F}-Port connection */ #define ISP_CFG_1GB 0x10 /* force 1GB connection (23XX only) */ #define ISP_CFG_2GB 0x20 /* force 2GB connection (23XX only) */ #define ISP_CFG_NORELOAD 0x80 /* don't download f/w */ #define ISP_CFG_NONVRAM 0x40 /* ignore NVRAM */ #define ISP_CFG_NOFCTAPE 0x100 /* disable FC-Tape */ #define ISP_CFG_FCTAPE 0x200 /* enable FC-Tape */ #define ISP_CFG_OWNFSZ 0x400 /* override NVRAM frame size */ #define ISP_CFG_OWNLOOPID 0x800 /* override NVRAM loopid */ #define ISP_CFG_OWNEXCTHROTTLE 0x1000 /* override NVRAM execution throttle */ #define ISP_CFG_4GB 0x2000 /* force 4GB connection (24XX only) */ #define ISP_CFG_8GB 0x4000 /* force 8GB connection (25XX only) */ #define ISP_CFG_16GB 0x8000 /* force 16GB connection (82XX only) */ /* * For each channel, the outer layers should know what role that channel * will take: ISP_ROLE_NONE, ISP_ROLE_INITIATOR, ISP_ROLE_TARGET, * ISP_ROLE_BOTH. * * If you set ISP_ROLE_NONE, the cards will be reset, new firmware loaded, * NVRAM read, and defaults set, but any further initialization (e.g. * INITIALIZE CONTROL BLOCK commands for 2X00 cards) won't be done. * * If INITIATOR MODE isn't set, attempts to run commands will be stopped * at isp_start and completed with the equivalent of SELECTION TIMEOUT. * * If TARGET MODE is set, it doesn't mean that the rest of target mode support * needs to be enabled, or will even work. What happens with the 2X00 cards * here is that if you have enabled it with TARGET MODE as part of the ICB * options, but you haven't given the f/w any ram resources for ATIOs or * Immediate Notifies, the f/w just handles what it can and you never see * anything. Basically, it sends a single byte of data (the first byte, * which you can set as part of the INITIALIZE CONTROL BLOCK command) for * INQUIRY, and sends back QUEUE FULL status for any other command. * */ #define ISP_ROLE_NONE 0x0 #define ISP_ROLE_TARGET 0x1 #define ISP_ROLE_INITIATOR 0x2 #define ISP_ROLE_BOTH (ISP_ROLE_TARGET|ISP_ROLE_INITIATOR) #define ISP_ROLE_EITHER ISP_ROLE_BOTH #ifndef ISP_DEFAULT_ROLES /* * Counterintuitively, we prefer to default to role 'none' * if we are enable target mode support. This gives us the * maximum flexibility as to which port will do what. */ #ifdef ISP_TARGET_MODE #define ISP_DEFAULT_ROLES ISP_ROLE_NONE #else #define ISP_DEFAULT_ROLES ISP_ROLE_INITIATOR #endif #endif /* * Firmware related defines */ #define ISP_CODE_ORG 0x1000 /* default f/w code start */ #define ISP_CODE_ORG_2300 0x0800 /* ..except for 2300s */ #define ISP_CODE_ORG_2400 0x100000 /* ..and 2400s */ #define ISP_FW_REV(maj, min, mic) ((maj << 24) | (min << 16) | mic) #define ISP_FW_MAJOR(code) ((code >> 24) & 0xff) #define ISP_FW_MINOR(code) ((code >> 16) & 0xff) #define ISP_FW_MICRO(code) ((code >> 8) & 0xff) #define ISP_FW_REVX(xp) ((xp[0]<<24) | (xp[1] << 16) | xp[2]) #define ISP_FW_MAJORX(xp) (xp[0]) #define ISP_FW_MINORX(xp) (xp[1]) #define ISP_FW_MICROX(xp) (xp[2]) #define ISP_FW_NEWER_THAN(i, major, minor, micro) \ (ISP_FW_REVX((i)->isp_fwrev) > ISP_FW_REV(major, minor, micro)) #define ISP_FW_OLDER_THAN(i, major, minor, micro) \ (ISP_FW_REVX((i)->isp_fwrev) < ISP_FW_REV(major, minor, micro)) /* * Bus (implementation) types */ #define ISP_BT_PCI 0 /* PCI Implementations */ #define ISP_BT_SBUS 1 /* SBus Implementations */ /* * If we have not otherwise defined SBus support away make sure * it is defined here such that the code is included as default */ #ifndef ISP_SBUS_SUPPORTED #define ISP_SBUS_SUPPORTED 1 #endif /* * Chip Types */ #define ISP_HA_SCSI 0xf #define ISP_HA_SCSI_UNKNOWN 0x1 #define ISP_HA_SCSI_1020 0x2 #define ISP_HA_SCSI_1020A 0x3 #define ISP_HA_SCSI_1040 0x4 #define ISP_HA_SCSI_1040A 0x5 #define ISP_HA_SCSI_1040B 0x6 #define ISP_HA_SCSI_1040C 0x7 #define ISP_HA_SCSI_1240 0x8 #define ISP_HA_SCSI_1080 0x9 #define ISP_HA_SCSI_1280 0xa #define ISP_HA_SCSI_10160 0xb #define ISP_HA_SCSI_12160 0xc #define ISP_HA_FC 0xf0 #define ISP_HA_FC_2100 0x10 #define ISP_HA_FC_2200 0x20 #define ISP_HA_FC_2300 0x30 #define ISP_HA_FC_2312 0x40 #define ISP_HA_FC_2322 0x50 #define ISP_HA_FC_2400 0x60 #define ISP_HA_FC_2500 0x70 #define ISP_HA_FC_2600 0x80 #define IS_SCSI(isp) (isp->isp_type & ISP_HA_SCSI) #define IS_1020(isp) (isp->isp_type < ISP_HA_SCSI_1240) #define IS_1240(isp) (isp->isp_type == ISP_HA_SCSI_1240) #define IS_1080(isp) (isp->isp_type == ISP_HA_SCSI_1080) #define IS_1280(isp) (isp->isp_type == ISP_HA_SCSI_1280) #define IS_10160(isp) (isp->isp_type == ISP_HA_SCSI_10160) #define IS_12160(isp) (isp->isp_type == ISP_HA_SCSI_12160) #define IS_12X0(isp) (IS_1240(isp) || IS_1280(isp)) #define IS_1X160(isp) (IS_10160(isp) || IS_12160(isp)) #define IS_DUALBUS(isp) (IS_12X0(isp) || IS_12160(isp)) #define IS_ULTRA2(isp) (IS_1080(isp) || IS_1280(isp) || IS_1X160(isp)) #define IS_ULTRA3(isp) (IS_1X160(isp)) #define IS_FC(isp) ((isp)->isp_type & ISP_HA_FC) #define IS_2100(isp) ((isp)->isp_type == ISP_HA_FC_2100) #define IS_2200(isp) ((isp)->isp_type == ISP_HA_FC_2200) #define IS_23XX(isp) ((isp)->isp_type >= ISP_HA_FC_2300 && \ (isp)->isp_type < ISP_HA_FC_2400) #define IS_2300(isp) ((isp)->isp_type == ISP_HA_FC_2300) #define IS_2312(isp) ((isp)->isp_type == ISP_HA_FC_2312) #define IS_2322(isp) ((isp)->isp_type == ISP_HA_FC_2322) #define IS_24XX(isp) ((isp)->isp_type >= ISP_HA_FC_2400) #define IS_25XX(isp) ((isp)->isp_type >= ISP_HA_FC_2500) #define IS_26XX(isp) ((isp)->isp_type >= ISP_HA_FC_2600) /* * DMA related macros */ #define DMA_WD3(x) (((uint16_t)(((uint64_t)x) >> 48)) & 0xffff) #define DMA_WD2(x) (((uint16_t)(((uint64_t)x) >> 32)) & 0xffff) #define DMA_WD1(x) ((uint16_t)((x) >> 16) & 0xffff) #define DMA_WD0(x) ((uint16_t)((x) & 0xffff)) #define DMA_LO32(x) ((uint32_t) (x)) #define DMA_HI32(x) ((uint32_t)(((uint64_t)x) >> 32)) /* * Core System Function Prototypes */ /* * Reset Hardware. Totally. Assumes that you'll follow this with a call to isp_init. */ void isp_reset(ispsoftc_t *, int); /* * Initialize Hardware to known state */ void isp_init(ispsoftc_t *); /* * Reset the ISP and call completion for any orphaned commands. */ int isp_reinit(ispsoftc_t *, int); /* * Internal Interrupt Service Routine * * The outer layers do the spade work to get the appropriate status register, * semaphore register and first mailbox register (if appropriate). This also * means that most spurious/bogus interrupts not for us can be filtered first. */ void isp_intr(ispsoftc_t *, uint16_t, uint16_t, uint16_t); /* * Command Entry Point- Platform Dependent layers call into this */ int isp_start(XS_T *); /* these values are what isp_start returns */ #define CMD_COMPLETE 101 /* command completed */ #define CMD_EAGAIN 102 /* busy- maybe retry later */ #define CMD_QUEUED 103 /* command has been queued for execution */ #define CMD_RQLATER 104 /* requeue this command later */ /* * Command Completion Point- Core layers call out from this with completed cmds */ void isp_done(XS_T *); /* * Platform Dependent to External to Internal Control Function * * Assumes locks are held on entry. You should note that with many of * these commands locks may be released while this function is called. * * ... ISPCTL_RESET_BUS, int channel); * Reset BUS on this channel * ... ISPCTL_RESET_DEV, int channel, int target); * Reset Device on this channel at this target. * ... ISPCTL_ABORT_CMD, XS_T *xs); * Abort active transaction described by xs. * ... IPCTL_UPDATE_PARAMS); * Update any operating parameters (speed, etc.) * ... ISPCTL_FCLINK_TEST, int channel); * Test FC link status on this channel * ... ISPCTL_SCAN_LOOP, int channel); * Scan local loop on this channel * ... ISPCTL_SCAN_FABRIC, int channel); * Scan fabric on this channel * ... ISPCTL_PDB_SYNC, int channel); * Synchronize port database on this channel * ... ISPCTL_SEND_LIP, int channel); * Send a LIP on this channel * ... ISPCTL_GET_NAMES, int channel, int np, uint64_t *wwnn, uint64_t *wwpn) * Get a WWNN/WWPN for this N-port handle on this channel * ... ISPCTL_RUN_MBOXCMD, mbreg_t *mbp) * Run this mailbox command * ... ISPCTL_GET_PDB, int channel, int nphandle, isp_pdb_t *pdb) * Get PDB on this channel for this N-port handle * ... ISPCTL_PLOGX, isp_plcmd_t *) * Performa a port login/logout * ... ISPCTL_CHANGE_ROLE, int channel, int role); * Change role of specified channel * * ISPCTL_PDB_SYNC is somewhat misnamed. It actually is the final step, in * order, of ISPCTL_FCLINK_TEST, ISPCTL_SCAN_LOOP, and ISPCTL_SCAN_FABRIC. * The main purpose of ISPCTL_PDB_SYNC is to complete management of logging * and logging out of fabric devices (if one is on a fabric) and then marking * the 'loop state' as being ready to now be used for sending commands to * devices. */ typedef enum { ISPCTL_RESET_BUS, ISPCTL_RESET_DEV, ISPCTL_ABORT_CMD, ISPCTL_UPDATE_PARAMS, ISPCTL_FCLINK_TEST, ISPCTL_SCAN_FABRIC, ISPCTL_SCAN_LOOP, ISPCTL_PDB_SYNC, ISPCTL_SEND_LIP, ISPCTL_GET_NAMES, ISPCTL_RUN_MBOXCMD, ISPCTL_GET_PDB, ISPCTL_PLOGX, ISPCTL_CHANGE_ROLE } ispctl_t; int isp_control(ispsoftc_t *, ispctl_t, ...); /* * Platform Dependent to Internal to External Control Function */ typedef enum { ISPASYNC_NEW_TGT_PARAMS, /* SPI New Target Parameters */ ISPASYNC_BUS_RESET, /* All Bus Was Reset */ ISPASYNC_LOOP_DOWN, /* FC Loop Down */ ISPASYNC_LOOP_UP, /* FC Loop Up */ ISPASYNC_LIP, /* FC LIP Received */ ISPASYNC_LOOP_RESET, /* FC Loop Reset Received */ ISPASYNC_CHANGE_NOTIFY, /* FC Change Notification */ ISPASYNC_DEV_ARRIVED, /* FC Device Arrived */ ISPASYNC_DEV_CHANGED, /* FC Device Changed */ ISPASYNC_DEV_STAYED, /* FC Device Stayed */ ISPASYNC_DEV_GONE, /* FC Device Departure */ ISPASYNC_TARGET_NOTIFY, /* All target async notification */ ISPASYNC_TARGET_NOTIFY_ACK, /* All target notify ack required */ ISPASYNC_TARGET_ACTION, /* All target action requested */ ISPASYNC_FW_CRASH, /* All Firmware has crashed */ ISPASYNC_FW_RESTARTED /* All Firmware has been restarted */ } ispasync_t; void isp_async(ispsoftc_t *, ispasync_t, ...); #define ISPASYNC_CHANGE_PDB 0 #define ISPASYNC_CHANGE_SNS 1 #define ISPASYNC_CHANGE_OTHER 2 /* * Platform Independent Error Prinout */ void isp_prt_endcmd(ispsoftc_t *, XS_T *); /* * Platform Dependent Error and Debug Printout * * Two required functions for each platform must be provided: * * void isp_prt(ispsoftc_t *, int level, const char *, ...) * void isp_xs_prt(ispsoftc_t *, XS_T *, int level, const char *, ...) * * but due to compiler differences on different platforms this won't be * formally defined here. Instead, they go in each platform definition file. */ #define ISP_LOGALL 0x0 /* log always */ #define ISP_LOGCONFIG 0x1 /* log configuration messages */ #define ISP_LOGINFO 0x2 /* log informational messages */ #define ISP_LOGWARN 0x4 /* log warning messages */ #define ISP_LOGERR 0x8 /* log error messages */ #define ISP_LOGDEBUG0 0x10 /* log simple debug messages */ #define ISP_LOGDEBUG1 0x20 /* log intermediate debug messages */ #define ISP_LOGDEBUG2 0x40 /* log most debug messages */ #define ISP_LOGDEBUG3 0x80 /* log high frequency debug messages */ #define ISP_LOG_SANCFG 0x100 /* log SAN configuration */ #define ISP_LOG_CWARN 0x200 /* log SCSI command "warnings" (e.g., check conditions) */ #define ISP_LOG_WARN1 0x400 /* log WARNS we might be interested at some time */ #define ISP_LOGTINFO 0x1000 /* log informational messages (target mode) */ #define ISP_LOGTDEBUG0 0x2000 /* log simple debug messages (target mode) */ #define ISP_LOGTDEBUG1 0x4000 /* log intermediate debug messages (target) */ #define ISP_LOGTDEBUG2 0x8000 /* log all debug messages (target) */ /* * Each Platform provides it's own isposinfo substructure of the ispsoftc * defined above. * * Each platform must also provide the following macros/defines: * * * ISP_FC_SCRLEN FC scratch area DMA length * * ISP_MEMZERO(dst, src) platform zeroing function * ISP_MEMCPY(dst, src, count) platform copying function * ISP_SNPRINTF(buf, bufsize, fmt, ...) snprintf * ISP_DELAY(usecs) microsecond spindelay function * ISP_SLEEP(isp, usecs) microsecond sleep function * * ISP_INLINE ___inline or not- depending on how * good your debugger is * ISP_MIN shorthand for ((a) < (b))? (a) : (b) * * NANOTIME_T nanosecond time type * * GET_NANOTIME(NANOTIME_T *) get current nanotime. * * GET_NANOSEC(NANOTIME_T *) get uint64_t from NANOTIME_T * * NANOTIME_SUB(NANOTIME_T *, NANOTIME_T *) * subtract two NANOTIME_T values * * MAXISPREQUEST(ispsoftc_t *) maximum request queue size * for this particular board type * * MEMORYBARRIER(ispsoftc_t *, barrier_type, offset, size, chan) * * Function/Macro the provides memory synchronization on * various objects so that the ISP's and the system's view * of the same object is consistent. * * MBOX_ACQUIRE(ispsoftc_t *) acquire lock on mailbox regs * MBOX_WAIT_COMPLETE(ispsoftc_t *, mbreg_t *) wait for cmd to be done * MBOX_NOTIFY_COMPLETE(ispsoftc_t *) notification of mbox cmd donee * MBOX_RELEASE(ispsoftc_t *) release lock on mailbox regs * * FC_SCRATCH_ACQUIRE(ispsoftc_t *, chan) acquire lock on FC scratch area * return -1 if you cannot * FC_SCRATCH_RELEASE(ispsoftc_t *, chan) acquire lock on FC scratch area * * FCP_NEXT_CRN(ispsoftc_t *, XS_T *, rslt, channel, target, lun) generate the next command reference number. XS_T * may be null. * * SCSI_GOOD SCSI 'Good' Status * SCSI_CHECK SCSI 'Check Condition' Status * SCSI_BUSY SCSI 'Busy' Status * SCSI_QFULL SCSI 'Queue Full' Status * * XS_T Platform SCSI transaction type (i.e., command for HBA) * XS_DMA_ADDR_T Platform PCI DMA Address Type * XS_GET_DMA_SEG(..) Get 32 bit dma segment list value * XS_GET_DMA64_SEG(..) Get 64 bit dma segment list value * XS_ISP(xs) gets an instance out of an XS_T * XS_CHANNEL(xs) gets the channel (bus # for DUALBUS cards) "" * XS_TGT(xs) gets the target "" * XS_LUN(xs) gets the lun "" * XS_CDBP(xs) gets a pointer to the scsi CDB "" * XS_CDBLEN(xs) gets the CDB's length "" * XS_XFRLEN(xs) gets the associated data transfer length "" * XS_TIME(xs) gets the time (in milliseconds) for this command * XS_GET_RESID(xs) gets the current residual count * XS_GET_RESID(xs, resid) sets the current residual count * XS_STSP(xs) gets a pointer to the SCSI status byte "" * XS_SNSP(xs) gets a pointer to the associate sense data * XS_TOT_SNSLEN(xs) gets the total length of sense data storage * XS_CUR_SNSLEN(xs) gets the currently used lenght of sense data storage * XS_SNSKEY(xs) dereferences XS_SNSP to get the current stored Sense Key * XS_SNSASC(xs) dereferences XS_SNSP to get the current stored Additional Sense Code * XS_SNSASCQ(xs) dereferences XS_SNSP to get the current stored Additional Sense Code Qualifier * XS_TAG_P(xs) predicate of whether this command should be tagged * XS_TAG_TYPE(xs) which type of tag to use * XS_SETERR(xs) set error state * * HBA_NOERROR command has no erros * HBA_BOTCH hba botched something * HBA_CMDTIMEOUT command timed out * HBA_SELTIMEOUT selection timed out (also port logouts for FC) * HBA_TGTBSY target returned a BUSY status * HBA_BUSRESET bus reset destroyed command * HBA_ABORTED command was aborted (by request) * HBA_DATAOVR a data overrun was detected * HBA_ARQFAIL Automatic Request Sense failed * * XS_ERR(xs) return current error state * XS_NOERR(xs) there is no error currently set * XS_INITERR(xs) initialize error state * * XS_SAVE_SENSE(xs, sp, total_len, this_len) save sense data (total and current amount) * * XS_APPEND_SENSE(xs, sp, len) append more sense data * * XS_SENSE_VALID(xs) indicates whether sense is valid * * DEFAULT_FRAMESIZE(ispsoftc_t *) Default Frame Size * DEFAULT_EXEC_THROTTLE(ispsoftc_t *) Default Execution Throttle * * DEFAULT_ROLE(ispsoftc_t *, int) Get Default Role for a channel * DEFAULT_IID(ispsoftc_t *, int) Default SCSI initiator ID * DEFAULT_LOOPID(ispsoftc_t *, int) Default FC Loop ID * * These establish reasonable defaults for each platform. * These must be available independent of card NVRAM and are * to be used should NVRAM not be readable. * * DEFAULT_NODEWWN(ispsoftc_t *, chan) Default FC Node WWN to use * DEFAULT_PORTWWN(ispsoftc_t *, chan) Default FC Port WWN to use * * These defines are hooks to allow the setting of node and * port WWNs when NVRAM cannot be read or is to be overriden. * * ACTIVE_NODEWWN(ispsoftc_t *, chan) FC Node WWN to use * ACTIVE_PORTWWN(ispsoftc_t *, chan) FC Port WWN to use * * After NVRAM is read, these will be invoked to get the * node and port WWNs that will actually be used for this * channel. * * * ISP_IOXPUT_8(ispsoftc_t *, uint8_t srcval, uint8_t *dstptr) * ISP_IOXPUT_16(ispsoftc_t *, uint16_t srcval, uint16_t *dstptr) * ISP_IOXPUT_32(ispsoftc_t *, uint32_t srcval, uint32_t *dstptr) * * ISP_IOXGET_8(ispsoftc_t *, uint8_t *srcptr, uint8_t dstrval) * ISP_IOXGET_16(ispsoftc_t *, uint16_t *srcptr, uint16_t dstrval) * ISP_IOXGET_32(ispsoftc_t *, uint32_t *srcptr, uint32_t dstrval) * * ISP_SWIZZLE_NVRAM_WORD(ispsoftc_t *, uint16_t *) * ISP_SWIZZLE_NVRAM_LONG(ispsoftc_t *, uint32_t *) * ISP_SWAP16(ispsoftc_t *, uint16_t srcval) * ISP_SWAP32(ispsoftc_t *, uint32_t srcval) */ #ifdef ISP_TARGET_MODE /* * The functions below are for the publicly available * target mode functions that are internal to the Qlogic driver. */ /* * This function handles new response queue entry appropriate for target mode. */ int isp_target_notify(ispsoftc_t *, void *, uint32_t *); /* * This function externalizes the ability to acknowledge an Immediate Notify request. */ int isp_notify_ack(ispsoftc_t *, void *); /* * This function externalized acknowledging (success/fail) an ABTS frame */ int isp_acknak_abts(ispsoftc_t *, void *, int); /* * General request queue 'put' routine for target mode entries. */ int isp_target_put_entry(ispsoftc_t *isp, void *); /* * General routine to put back an ATIO entry- * used for replenishing f/w resource counts. * The argument is a pointer to a source ATIO * or ATIO2. */ int isp_target_put_atio(ispsoftc_t *, void *); /* * General routine to send a final CTIO for a command- used mostly for * local responses. */ int isp_endcmd(ispsoftc_t *, ...); #define ECMD_SVALID 0x100 #define ECMD_TERMINATE 0x200 /* * Handle an asynchronous event * * Return nonzero if the interrupt that generated this event has been dismissed. */ int isp_target_async(ispsoftc_t *, int, int); #endif #endif /* _ISPVAR_H */ Index: projects/release-pkg/sys/dev/urtwn/if_urtwn.c =================================================================== --- projects/release-pkg/sys/dev/urtwn/if_urtwn.c (revision 297926) +++ projects/release-pkg/sys/dev/urtwn/if_urtwn.c (revision 297927) @@ -1,5477 +1,5493 @@ /* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2014 Kevin Lo * Copyright (c) 2015 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for Realtek RTL8188CE-VAU/RTL8188CUS/RTL8188EU/RTL8188RU/RTL8192CU. */ #include "opt_wlan.h" #include "opt_urtwn.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #include #include #include #include "usbdevs.h" #include #include #include #ifdef USB_DEBUG enum { URTWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ URTWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ URTWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ URTWN_DEBUG_RA = 0x00000008, /* f/w rate adaptation setup */ URTWN_DEBUG_USB = 0x00000010, /* usb requests */ URTWN_DEBUG_FIRMWARE = 0x00000020, /* firmware(9) loading debug */ URTWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ URTWN_DEBUG_INTR = 0x00000080, /* ISR */ URTWN_DEBUG_TEMP = 0x00000100, /* temperature calibration */ URTWN_DEBUG_ROM = 0x00000200, /* various ROM info */ URTWN_DEBUG_KEY = 0x00000400, /* crypto keys management */ URTWN_DEBUG_TXPWR = 0x00000800, /* dump Tx power values */ URTWN_DEBUG_RSSI = 0x00001000, /* dump RSSI lookups */ URTWN_DEBUG_ANY = 0xffffffff }; #define URTWN_DPRINTF(_sc, _m, ...) do { \ if ((_sc)->sc_debug & (_m)) \ device_printf((_sc)->sc_dev, __VA_ARGS__); \ } while(0) #else #define URTWN_DPRINTF(_sc, _m, ...) do { (void) sc; } while (0) #endif #define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh) static int urtwn_enable_11n = 1; TUNABLE_INT("hw.usb.urtwn.enable_11n", &urtwn_enable_11n); /* various supported device vendors/products */ static const STRUCT_USB_HOST_ID urtwn_devs[] = { #define URTWN_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } #define URTWN_RTL8188E_DEV(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, URTWN_RTL8188E) } #define URTWN_RTL8188E 1 URTWN_DEV(ABOCOM, RTL8188CU_1), URTWN_DEV(ABOCOM, RTL8188CU_2), URTWN_DEV(ABOCOM, RTL8192CU), URTWN_DEV(ASUS, RTL8192CU), URTWN_DEV(ASUS, USBN10NANO), URTWN_DEV(AZUREWAVE, RTL8188CE_1), URTWN_DEV(AZUREWAVE, RTL8188CE_2), URTWN_DEV(AZUREWAVE, RTL8188CU), URTWN_DEV(BELKIN, F7D2102), URTWN_DEV(BELKIN, RTL8188CU), URTWN_DEV(BELKIN, RTL8192CU), URTWN_DEV(CHICONY, RTL8188CUS_1), URTWN_DEV(CHICONY, RTL8188CUS_2), URTWN_DEV(CHICONY, RTL8188CUS_3), URTWN_DEV(CHICONY, RTL8188CUS_4), URTWN_DEV(CHICONY, RTL8188CUS_5), URTWN_DEV(COREGA, RTL8192CU), URTWN_DEV(DLINK, RTL8188CU), URTWN_DEV(DLINK, RTL8192CU_1), URTWN_DEV(DLINK, RTL8192CU_2), URTWN_DEV(DLINK, RTL8192CU_3), URTWN_DEV(DLINK, DWA131B), URTWN_DEV(EDIMAX, EW7811UN), URTWN_DEV(EDIMAX, RTL8192CU), URTWN_DEV(FEIXUN, RTL8188CU), URTWN_DEV(FEIXUN, RTL8192CU), URTWN_DEV(GUILLEMOT, HWNUP150), URTWN_DEV(HAWKING, RTL8192CU), URTWN_DEV(HP3, RTL8188CU), URTWN_DEV(NETGEAR, WNA1000M), URTWN_DEV(NETGEAR, RTL8192CU), URTWN_DEV(NETGEAR4, RTL8188CU), URTWN_DEV(NOVATECH, RTL8188CU), URTWN_DEV(PLANEX2, RTL8188CU_1), URTWN_DEV(PLANEX2, RTL8188CU_2), URTWN_DEV(PLANEX2, RTL8188CU_3), URTWN_DEV(PLANEX2, RTL8188CU_4), URTWN_DEV(PLANEX2, RTL8188CUS), URTWN_DEV(PLANEX2, RTL8192CU), URTWN_DEV(REALTEK, RTL8188CE_0), URTWN_DEV(REALTEK, RTL8188CE_1), URTWN_DEV(REALTEK, RTL8188CTV), URTWN_DEV(REALTEK, RTL8188CU_0), URTWN_DEV(REALTEK, RTL8188CU_1), URTWN_DEV(REALTEK, RTL8188CU_2), URTWN_DEV(REALTEK, RTL8188CU_3), URTWN_DEV(REALTEK, RTL8188CU_COMBO), URTWN_DEV(REALTEK, RTL8188CUS), URTWN_DEV(REALTEK, RTL8188RU_1), URTWN_DEV(REALTEK, RTL8188RU_2), URTWN_DEV(REALTEK, RTL8188RU_3), URTWN_DEV(REALTEK, RTL8191CU), URTWN_DEV(REALTEK, RTL8192CE), URTWN_DEV(REALTEK, RTL8192CU), URTWN_DEV(SITECOMEU, RTL8188CU_1), URTWN_DEV(SITECOMEU, RTL8188CU_2), URTWN_DEV(SITECOMEU, RTL8192CU), URTWN_DEV(TRENDNET, RTL8188CU), URTWN_DEV(TRENDNET, RTL8192CU), URTWN_DEV(ZYXEL, RTL8192CU), /* URTWN_RTL8188E */ URTWN_RTL8188E_DEV(ABOCOM, RTL8188EU), URTWN_RTL8188E_DEV(DLINK, DWA123D1), URTWN_RTL8188E_DEV(DLINK, DWA125D1), URTWN_RTL8188E_DEV(ELECOM, WDC150SU2M), URTWN_RTL8188E_DEV(REALTEK, RTL8188ETV), URTWN_RTL8188E_DEV(REALTEK, RTL8188EU), #undef URTWN_RTL8188E_DEV #undef URTWN_DEV }; static device_probe_t urtwn_match; static device_attach_t urtwn_attach; static device_detach_t urtwn_detach; static usb_callback_t urtwn_bulk_tx_callback; static usb_callback_t urtwn_bulk_rx_callback; static void urtwn_sysctlattach(struct urtwn_softc *); static void urtwn_drain_mbufq(struct urtwn_softc *); static usb_error_t urtwn_do_request(struct urtwn_softc *, struct usb_device_request *, void *); static struct ieee80211vap *urtwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void urtwn_vap_delete(struct ieee80211vap *); static struct mbuf * urtwn_rx_copy_to_mbuf(struct urtwn_softc *, struct r92c_rx_stat *, int); static struct mbuf * urtwn_report_intr(struct usb_xfer *, struct urtwn_data *); static struct mbuf * urtwn_rxeof(struct urtwn_softc *, uint8_t *, int); static void urtwn_r88e_ratectl_tx_complete(struct urtwn_softc *, void *); static struct ieee80211_node *urtwn_rx_frame(struct urtwn_softc *, struct mbuf *, int8_t *); static void urtwn_txeof(struct urtwn_softc *, struct urtwn_data *, int); static int urtwn_alloc_list(struct urtwn_softc *, struct urtwn_data[], int, int); static int urtwn_alloc_rx_list(struct urtwn_softc *); static int urtwn_alloc_tx_list(struct urtwn_softc *); static void urtwn_free_list(struct urtwn_softc *, struct urtwn_data data[], int); static void urtwn_free_rx_list(struct urtwn_softc *); static void urtwn_free_tx_list(struct urtwn_softc *); static struct urtwn_data * _urtwn_getbuf(struct urtwn_softc *); static struct urtwn_data * urtwn_getbuf(struct urtwn_softc *); static usb_error_t urtwn_write_region_1(struct urtwn_softc *, uint16_t, uint8_t *, int); static usb_error_t urtwn_write_1(struct urtwn_softc *, uint16_t, uint8_t); static usb_error_t urtwn_write_2(struct urtwn_softc *, uint16_t, uint16_t); static usb_error_t urtwn_write_4(struct urtwn_softc *, uint16_t, uint32_t); static usb_error_t urtwn_read_region_1(struct urtwn_softc *, uint16_t, uint8_t *, int); static uint8_t urtwn_read_1(struct urtwn_softc *, uint16_t); static uint16_t urtwn_read_2(struct urtwn_softc *, uint16_t); static uint32_t urtwn_read_4(struct urtwn_softc *, uint16_t); static int urtwn_fw_cmd(struct urtwn_softc *, uint8_t, const void *, int); static void urtwn_cmdq_cb(void *, int); static int urtwn_cmd_sleepable(struct urtwn_softc *, const void *, size_t, CMD_FUNC_PROTO); static void urtwn_r92c_rf_write(struct urtwn_softc *, int, uint8_t, uint32_t); static void urtwn_r88e_rf_write(struct urtwn_softc *, int, uint8_t, uint32_t); static uint32_t urtwn_rf_read(struct urtwn_softc *, int, uint8_t); static int urtwn_llt_write(struct urtwn_softc *, uint32_t, uint32_t); static int urtwn_efuse_read_next(struct urtwn_softc *, uint8_t *); static int urtwn_efuse_read_data(struct urtwn_softc *, uint8_t *, uint8_t, uint8_t); #ifdef USB_DEBUG static void urtwn_dump_rom_contents(struct urtwn_softc *, uint8_t *, uint16_t); #endif static int urtwn_efuse_read(struct urtwn_softc *, uint8_t *, uint16_t); static int urtwn_efuse_switch_power(struct urtwn_softc *); static int urtwn_read_chipid(struct urtwn_softc *); static int urtwn_read_rom(struct urtwn_softc *); static int urtwn_r88e_read_rom(struct urtwn_softc *); static int urtwn_ra_init(struct urtwn_softc *); static void urtwn_init_beacon(struct urtwn_softc *, struct urtwn_vap *); static int urtwn_setup_beacon(struct urtwn_softc *, struct ieee80211_node *); static void urtwn_update_beacon(struct ieee80211vap *, int); static int urtwn_tx_beacon(struct urtwn_softc *sc, struct urtwn_vap *); static int urtwn_key_alloc(struct ieee80211vap *, struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); static void urtwn_key_set_cb(struct urtwn_softc *, union sec_param *); static void urtwn_key_del_cb(struct urtwn_softc *, union sec_param *); static int urtwn_key_set(struct ieee80211vap *, const struct ieee80211_key *); static int urtwn_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static void urtwn_tsf_task_adhoc(void *, int); static void urtwn_tsf_sync_enable(struct urtwn_softc *, struct ieee80211vap *); static void urtwn_get_tsf(struct urtwn_softc *, uint64_t *); static void urtwn_set_led(struct urtwn_softc *, int, int); static void urtwn_set_mode(struct urtwn_softc *, uint8_t); static void urtwn_ibss_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, const struct ieee80211_rx_stats *, int, int); static int urtwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void urtwn_calib_to(void *); static void urtwn_calib_cb(struct urtwn_softc *, union sec_param *); static void urtwn_watchdog(void *); static void urtwn_update_avgrssi(struct urtwn_softc *, int, int8_t); static int8_t urtwn_get_rssi(struct urtwn_softc *, int, void *); static int8_t urtwn_r88e_get_rssi(struct urtwn_softc *, int, void *); static int urtwn_tx_data(struct urtwn_softc *, struct ieee80211_node *, struct mbuf *, struct urtwn_data *); static int urtwn_tx_raw(struct urtwn_softc *, struct ieee80211_node *, struct mbuf *, struct urtwn_data *, const struct ieee80211_bpf_params *); static void urtwn_tx_start(struct urtwn_softc *, struct mbuf *, uint8_t, struct urtwn_data *); static int urtwn_transmit(struct ieee80211com *, struct mbuf *); static void urtwn_start(struct urtwn_softc *); static void urtwn_parent(struct ieee80211com *); static int urtwn_r92c_power_on(struct urtwn_softc *); static int urtwn_r88e_power_on(struct urtwn_softc *); static void urtwn_r92c_power_off(struct urtwn_softc *); static void urtwn_r88e_power_off(struct urtwn_softc *); static int urtwn_llt_init(struct urtwn_softc *); #ifndef URTWN_WITHOUT_UCODE static void urtwn_fw_reset(struct urtwn_softc *); static void urtwn_r88e_fw_reset(struct urtwn_softc *); static int urtwn_fw_loadpage(struct urtwn_softc *, int, const uint8_t *, int); static int urtwn_load_firmware(struct urtwn_softc *); #endif static int urtwn_dma_init(struct urtwn_softc *); static int urtwn_mac_init(struct urtwn_softc *); static void urtwn_bb_init(struct urtwn_softc *); static void urtwn_rf_init(struct urtwn_softc *); static void urtwn_cam_init(struct urtwn_softc *); static int urtwn_cam_write(struct urtwn_softc *, uint32_t, uint32_t); static void urtwn_pa_bias_init(struct urtwn_softc *); static void urtwn_rxfilter_init(struct urtwn_softc *); static void urtwn_edca_init(struct urtwn_softc *); static void urtwn_write_txpower(struct urtwn_softc *, int, uint16_t[]); static void urtwn_get_txpower(struct urtwn_softc *, int, struct ieee80211_channel *, struct ieee80211_channel *, uint16_t[]); static void urtwn_r88e_get_txpower(struct urtwn_softc *, int, struct ieee80211_channel *, struct ieee80211_channel *, uint16_t[]); static void urtwn_set_txpower(struct urtwn_softc *, struct ieee80211_channel *, struct ieee80211_channel *); static void urtwn_set_rx_bssid_all(struct urtwn_softc *, int); static void urtwn_set_gain(struct urtwn_softc *, uint8_t); static void urtwn_scan_start(struct ieee80211com *); static void urtwn_scan_end(struct ieee80211com *); static void urtwn_set_channel(struct ieee80211com *); static int urtwn_wme_update(struct ieee80211com *); static void urtwn_update_slot(struct ieee80211com *); static void urtwn_update_slot_cb(struct urtwn_softc *, union sec_param *); static void urtwn_update_aifs(struct urtwn_softc *, uint8_t); static void urtwn_set_promisc(struct urtwn_softc *); static void urtwn_update_promisc(struct ieee80211com *); static void urtwn_update_mcast(struct ieee80211com *); -static struct ieee80211_node *urtwn_r88e_node_alloc(struct ieee80211vap *, +static struct ieee80211_node *urtwn_node_alloc(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN]); -static void urtwn_r88e_newassoc(struct ieee80211_node *, int); -static void urtwn_r88e_node_free(struct ieee80211_node *); +static void urtwn_newassoc(struct ieee80211_node *, int); +static void urtwn_node_free(struct ieee80211_node *); static void urtwn_set_chan(struct urtwn_softc *, struct ieee80211_channel *, struct ieee80211_channel *); static void urtwn_iq_calib(struct urtwn_softc *); static void urtwn_lc_calib(struct urtwn_softc *); static void urtwn_temp_calib(struct urtwn_softc *); static int urtwn_init(struct urtwn_softc *); static void urtwn_stop(struct urtwn_softc *); static void urtwn_abort_xfers(struct urtwn_softc *); static int urtwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void urtwn_ms_delay(struct urtwn_softc *); /* Aliases. */ #define urtwn_bb_write urtwn_write_4 #define urtwn_bb_read urtwn_read_4 static const struct usb_config urtwn_config[URTWN_N_TRANSFER] = { [URTWN_BULK_RX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = URTWN_RXBUFSZ, .flags = { .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = urtwn_bulk_rx_callback, }, [URTWN_BULK_TX_BE] = { .type = UE_BULK, .endpoint = 0x03, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, [URTWN_BULK_TX_BK] = { .type = UE_BULK, .endpoint = 0x03, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1, }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, [URTWN_BULK_TX_VI] = { .type = UE_BULK, .endpoint = 0x02, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, [URTWN_BULK_TX_VO] = { .type = UE_BULK, .endpoint = 0x02, .direction = UE_DIR_OUT, .bufsize = URTWN_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = urtwn_bulk_tx_callback, .timeout = URTWN_TX_TIMEOUT, /* ms */ }, }; static const struct wme_to_queue { uint16_t reg; uint8_t qid; } wme2queue[WME_NUM_AC] = { { R92C_EDCA_BE_PARAM, URTWN_BULK_TX_BE}, { R92C_EDCA_BK_PARAM, URTWN_BULK_TX_BK}, { R92C_EDCA_VI_PARAM, URTWN_BULK_TX_VI}, { R92C_EDCA_VO_PARAM, URTWN_BULK_TX_VO} }; static int urtwn_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != URTWN_CONFIG_INDEX) return (ENXIO); if (uaa->info.bIfaceIndex != URTWN_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(urtwn_devs, sizeof(urtwn_devs), uaa)); } static void urtwn_update_chw(struct ieee80211com *ic) { } static int urtwn_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { /* We're driving this ourselves (eventually); don't involve net80211 */ return (0); } static int urtwn_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct urtwn_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)]; int error; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; if (USB_GET_DRIVER_INFO(uaa) == URTWN_RTL8188E) sc->chip |= URTWN_CHIP_88E; #ifdef USB_DEBUG int debug; if (resource_int_value(device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev), "debug", &debug) == 0) sc->sc_debug = debug; #endif mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF); URTWN_CMDQ_LOCK_INIT(sc); URTWN_NT_LOCK_INIT(sc); callout_init(&sc->sc_calib_to, 0); callout_init(&sc->sc_watchdog_ch, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_iface_index = URTWN_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &sc->sc_iface_index, sc->sc_xfer, urtwn_config, URTWN_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } URTWN_LOCK(sc); error = urtwn_read_chipid(sc); if (error) { device_printf(sc->sc_dev, "unsupported test chip\n"); URTWN_UNLOCK(sc); goto detach; } /* Determine number of Tx/Rx chains. */ if (sc->chip & URTWN_CHIP_92C) { sc->ntxchains = (sc->chip & URTWN_CHIP_92C_1T2R) ? 1 : 2; sc->nrxchains = 2; } else { sc->ntxchains = 1; sc->nrxchains = 1; } if (sc->chip & URTWN_CHIP_88E) error = urtwn_r88e_read_rom(sc); else error = urtwn_read_rom(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: cannot read rom, error %d\n", __func__, error); URTWN_UNLOCK(sc); goto detach; } device_printf(sc->sc_dev, "MAC/BB RTL%s, RF 6052 %dT%dR\n", (sc->chip & URTWN_CHIP_92C) ? "8192CU" : (sc->chip & URTWN_CHIP_88E) ? "8188EU" : (sc->board_type == R92C_BOARD_TYPE_HIGHPA) ? "8188RU" : (sc->board_type == R92C_BOARD_TYPE_MINICARD) ? "8188CE-VAU" : "8188CUS", sc->ntxchains, sc->nrxchains); URTWN_UNLOCK(sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(self); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_IBSS /* adhoc mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ #if 0 | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif | IEEE80211_C_WPA /* 802.11i */ | IEEE80211_C_WME /* 802.11e */ | IEEE80211_C_SWAMSDUTX /* Do software A-MSDU TX */ | IEEE80211_C_FF /* Atheros fast-frames */ ; ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_AES_CCM; /* Assume they're all 11n capable for now */ if (urtwn_enable_11n) { device_printf(self, "enabling 11n\n"); ic->ic_htcaps = IEEE80211_HTC_HT | #if 0 IEEE80211_HTC_AMPDU | #endif IEEE80211_HTC_AMSDU | IEEE80211_HTCAP_MAXAMSDU_3839 | IEEE80211_HTCAP_SMPS_OFF; /* no HT40 just yet */ // ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40; /* XXX TODO: verify chains versus streams for urtwn */ ic->ic_txstream = sc->ntxchains; ic->ic_rxstream = sc->nrxchains; } memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); if (urtwn_enable_11n) setbit(bands, IEEE80211_MODE_11NG); ieee80211_init_channels(ic, NULL, bands); ieee80211_ifattach(ic); ic->ic_raw_xmit = urtwn_raw_xmit; ic->ic_scan_start = urtwn_scan_start; ic->ic_scan_end = urtwn_scan_end; ic->ic_set_channel = urtwn_set_channel; ic->ic_transmit = urtwn_transmit; ic->ic_parent = urtwn_parent; ic->ic_vap_create = urtwn_vap_create; ic->ic_vap_delete = urtwn_vap_delete; ic->ic_wme.wme_update = urtwn_wme_update; ic->ic_updateslot = urtwn_update_slot; ic->ic_update_promisc = urtwn_update_promisc; ic->ic_update_mcast = urtwn_update_mcast; if (sc->chip & URTWN_CHIP_88E) { - ic->ic_node_alloc = urtwn_r88e_node_alloc; - ic->ic_newassoc = urtwn_r88e_newassoc; + ic->ic_node_alloc = urtwn_node_alloc; + ic->ic_newassoc = urtwn_newassoc; sc->sc_node_free = ic->ic_node_free; - ic->ic_node_free = urtwn_r88e_node_free; + ic->ic_node_free = urtwn_node_free; } ic->ic_update_chw = urtwn_update_chw; ic->ic_ampdu_enable = urtwn_ampdu_enable; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), URTWN_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), URTWN_RX_RADIOTAP_PRESENT); TASK_INIT(&sc->cmdq_task, 0, urtwn_cmdq_cb, sc); urtwn_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); return (0); detach: urtwn_detach(self); return (ENXIO); /* failure */ } static void urtwn_sysctlattach(struct urtwn_softc *sc) { #ifdef USB_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_U32(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, "control debugging printfs"); #endif } static int urtwn_detach(device_t self) { struct urtwn_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; unsigned int x; /* Prevent further ioctls. */ URTWN_LOCK(sc); sc->sc_flags |= URTWN_DETACHED; URTWN_UNLOCK(sc); urtwn_stop(sc); callout_drain(&sc->sc_watchdog_ch); callout_drain(&sc->sc_calib_to); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, URTWN_N_TRANSFER); /* Prevent further allocations from RX/TX data lists. */ URTWN_LOCK(sc); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); URTWN_UNLOCK(sc); /* drain USB transfers */ for (x = 0; x != URTWN_N_TRANSFER; x++) usbd_transfer_drain(sc->sc_xfer[x]); /* Free data buffers. */ URTWN_LOCK(sc); urtwn_free_tx_list(sc); urtwn_free_rx_list(sc); URTWN_UNLOCK(sc); if (ic->ic_softc == sc) { ieee80211_draintask(ic, &sc->cmdq_task); ieee80211_ifdetach(ic); } URTWN_NT_LOCK_DESTROY(sc); URTWN_CMDQ_LOCK_DESTROY(sc); mtx_destroy(&sc->sc_mtx); return (0); } static void urtwn_drain_mbufq(struct urtwn_softc *sc) { struct mbuf *m; struct ieee80211_node *ni; URTWN_ASSERT_LOCKED(sc); while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; ieee80211_free_node(ni); m_freem(m); } } static usb_error_t urtwn_do_request(struct urtwn_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; URTWN_ASSERT_LOCKED(sc); while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; URTWN_DPRINTF(sc, URTWN_DEBUG_USB, "%s: control request failed, %s (retries left: %d)\n", __func__, usbd_errstr(err), ntries); usb_pause_mtx(&sc->sc_mtx, hz / 100); } return (err); } static struct ieee80211vap * urtwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct urtwn_softc *sc = ic->ic_softc; struct urtwn_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return (NULL); uvp = malloc(sizeof(struct urtwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_IBSS) urtwn_init_beacon(sc, uvp); /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = urtwn_newstate; vap->iv_update_beacon = urtwn_update_beacon; vap->iv_key_alloc = urtwn_key_alloc; vap->iv_key_set = urtwn_key_set; vap->iv_key_delete = urtwn_key_delete; if (opmode == IEEE80211_M_IBSS) { uvp->recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = urtwn_ibss_recv_mgmt; TASK_INIT(&uvp->tsf_task_adhoc, 0, urtwn_tsf_task_adhoc, vap); } if (URTWN_CHIP_HAS_RATECTL(sc)) ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return (vap); } static void urtwn_vap_delete(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct urtwn_softc *sc = ic->ic_softc; struct urtwn_vap *uvp = URTWN_VAP(vap); if (uvp->bcn_mbuf != NULL) m_freem(uvp->bcn_mbuf); if (vap->iv_opmode == IEEE80211_M_IBSS) ieee80211_draintask(ic, &uvp->tsf_task_adhoc); if (URTWN_CHIP_HAS_RATECTL(sc)) ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static struct mbuf * urtwn_rx_copy_to_mbuf(struct urtwn_softc *sc, struct r92c_rx_stat *stat, int totlen) { struct ieee80211com *ic = &sc->sc_ic; struct mbuf *m; uint32_t rxdw0; int pktlen; /* * don't pass packets to the ieee80211 framework if the driver isn't * RUNNING. */ if (!(sc->sc_flags & URTWN_RUNNING)) return (NULL); rxdw0 = le32toh(stat->rxdw0); if (rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR)) { /* * This should not happen since we setup our Rx filter * to not receive these frames. */ URTWN_DPRINTF(sc, URTWN_DEBUG_RECV, "%s: RX flags error (%s)\n", __func__, rxdw0 & R92C_RXDW0_CRCERR ? "CRC" : "ICV"); goto fail; } pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN); if (pktlen < sizeof(struct ieee80211_frame_ack)) { URTWN_DPRINTF(sc, URTWN_DEBUG_RECV, "%s: frame is too short: %d\n", __func__, pktlen); goto fail; } if (__predict_false(totlen > MCLBYTES)) { /* convert to m_getjcl if this happens */ device_printf(sc->sc_dev, "%s: frame too long: %d (%d)\n", __func__, pktlen, totlen); goto fail; } m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m == NULL)) { device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n", __func__); goto fail; } /* Finalize mbuf. */ memcpy(mtod(m, uint8_t *), (uint8_t *)stat, totlen); m->m_pkthdr.len = m->m_len = totlen; return (m); fail: counter_u64_add(ic->ic_ierrors, 1); return (NULL); } static struct mbuf * urtwn_report_intr(struct usb_xfer *xfer, struct urtwn_data *data) { struct urtwn_softc *sc = data->sc; struct ieee80211com *ic = &sc->sc_ic; struct r92c_rx_stat *stat; uint8_t *buf; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); if (len < sizeof(*stat)) { counter_u64_add(ic->ic_ierrors, 1); return (NULL); } buf = data->buf; stat = (struct r92c_rx_stat *)buf; /* * For 88E chips we can tie the FF flushing here; * this is where we do know exactly how deep the * transmit queue is. * * But it won't work for R92 chips, so we can't * take the easy way out. */ if (sc->chip & URTWN_CHIP_88E) { int report_sel = MS(le32toh(stat->rxdw3), R88E_RXDW3_RPT); switch (report_sel) { case R88E_RXDW3_RPT_RX: return (urtwn_rxeof(sc, buf, len)); case R88E_RXDW3_RPT_TX1: urtwn_r88e_ratectl_tx_complete(sc, &stat[1]); break; default: URTWN_DPRINTF(sc, URTWN_DEBUG_INTR, "%s: case %d was not handled\n", __func__, report_sel); break; } } else return (urtwn_rxeof(sc, buf, len)); return (NULL); } static struct mbuf * urtwn_rxeof(struct urtwn_softc *sc, uint8_t *buf, int len) { struct r92c_rx_stat *stat; struct mbuf *m, *m0 = NULL, *prevm = NULL; uint32_t rxdw0; int totlen, pktlen, infosz, npkts; /* Get the number of encapsulated frames. */ stat = (struct r92c_rx_stat *)buf; npkts = MS(le32toh(stat->rxdw2), R92C_RXDW2_PKTCNT); URTWN_DPRINTF(sc, URTWN_DEBUG_RECV, "%s: Rx %d frames in one chunk\n", __func__, npkts); /* Process all of them. */ while (npkts-- > 0) { if (len < sizeof(*stat)) break; stat = (struct r92c_rx_stat *)buf; rxdw0 = le32toh(stat->rxdw0); pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN); if (pktlen == 0) break; infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8; /* Make sure everything fits in xfer. */ totlen = sizeof(*stat) + infosz + pktlen; if (totlen > len) break; m = urtwn_rx_copy_to_mbuf(sc, stat, totlen); if (m0 == NULL) m0 = m; if (prevm == NULL) prevm = m; else { prevm->m_next = m; prevm = m; } /* Next chunk is 128-byte aligned. */ totlen = (totlen + 127) & ~127; buf += totlen; len -= totlen; } return (m0); } static void urtwn_r88e_ratectl_tx_complete(struct urtwn_softc *sc, void *arg) { struct r88e_tx_rpt_ccx *rpt = arg; struct ieee80211vap *vap; struct ieee80211_node *ni; uint8_t macid; int ntries; macid = MS(rpt->rptb1, R88E_RPTB1_MACID); ntries = MS(rpt->rptb2, R88E_RPTB2_RETRY_CNT); URTWN_NT_LOCK(sc); ni = sc->node_list[macid]; if (ni != NULL) { vap = ni->ni_vap; URTWN_DPRINTF(sc, URTWN_DEBUG_INTR, "%s: frame for macid %d was" "%s sent (%d retries)\n", __func__, macid, (rpt->rptb1 & R88E_RPTB1_PKT_OK) ? "" : " not", ntries); if (rpt->rptb1 & R88E_RPTB1_PKT_OK) { ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_SUCCESS, &ntries, NULL); } else { ieee80211_ratectl_tx_complete(vap, ni, IEEE80211_RATECTL_TX_FAILURE, &ntries, NULL); } } else { URTWN_DPRINTF(sc, URTWN_DEBUG_INTR, "%s: macid %d, ni is NULL\n", __func__, macid); } URTWN_NT_UNLOCK(sc); } static struct ieee80211_node * urtwn_rx_frame(struct urtwn_softc *sc, struct mbuf *m, int8_t *rssi_p) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_frame_min *wh; struct r92c_rx_stat *stat; uint32_t rxdw0, rxdw3; uint8_t rate, cipher; - int8_t rssi = URTWN_NOISE_FLOOR + 1; + int8_t rssi = -127; int infosz; stat = mtod(m, struct r92c_rx_stat *); rxdw0 = le32toh(stat->rxdw0); rxdw3 = le32toh(stat->rxdw3); rate = MS(rxdw3, R92C_RXDW3_RATE); cipher = MS(rxdw0, R92C_RXDW0_CIPHER); infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8; /* Get RSSI from PHY status descriptor if present. */ if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) { if (sc->chip & URTWN_CHIP_88E) rssi = urtwn_r88e_get_rssi(sc, rate, &stat[1]); else rssi = urtwn_get_rssi(sc, rate, &stat[1]); + URTWN_DPRINTF(sc, URTWN_DEBUG_RSSI, "%s: rssi=%d\n", __func__, rssi); /* Update our average RSSI. */ urtwn_update_avgrssi(sc, rate, rssi); } if (ieee80211_radiotap_active(ic)) { struct urtwn_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; urtwn_get_tsf(sc, &tap->wr_tsft); if (__predict_false(le32toh((uint32_t)tap->wr_tsft) < le32toh(stat->rxdw5))) { tap->wr_tsft = le32toh(tap->wr_tsft >> 32) - 1; tap->wr_tsft = (uint64_t)htole32(tap->wr_tsft) << 32; } else tap->wr_tsft &= 0xffffffff00000000; tap->wr_tsft += stat->rxdw5; /* XXX 20/40? */ /* XXX shortgi? */ /* Map HW rate index to 802.11 rate. */ if (!(rxdw3 & R92C_RXDW3_HT)) { tap->wr_rate = ridx2rate[rate]; } else if (rate >= 12) { /* MCS0~15. */ /* Bit 7 set means HT MCS instead of rate. */ tap->wr_rate = 0x80 | (rate - 12); } + + /* XXX TODO: this isn't right; should use the last good RSSI */ tap->wr_dbm_antsignal = rssi; tap->wr_dbm_antnoise = URTWN_NOISE_FLOOR; } *rssi_p = rssi; /* Drop descriptor. */ m_adj(m, sizeof(*stat) + infosz); wh = mtod(m, struct ieee80211_frame_min *); if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && cipher != R92C_CAM_ALGO_NONE) { m->m_flags |= M_WEP; } if (m->m_len >= sizeof(*wh)) return (ieee80211_find_rxnode(ic, wh)); return (NULL); } static void urtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct urtwn_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; struct mbuf *m = NULL, *next; struct urtwn_data *data; int8_t nf, rssi; URTWN_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_rx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); m = urtwn_report_intr(xfer, data); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_rx_inactive); if (data == NULL) { KASSERT(m == NULL, ("mbuf isn't NULL")); goto finish; } STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next); STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ while (m != NULL) { next = m->m_next; m->m_next = NULL; ni = urtwn_rx_frame(sc, m, &rssi); + + /* Store a global last-good RSSI */ + if (rssi != -127) + sc->last_rssi = rssi; + URTWN_UNLOCK(sc); nf = URTWN_NOISE_FLOOR; if (ni != NULL) { + if (rssi != -127) + URTWN_NODE(ni)->last_rssi = rssi; if (ni->ni_flags & IEEE80211_NODE_HT) m->m_flags |= M_AMPDU; - (void)ieee80211_input(ni, m, rssi - nf, nf); + (void)ieee80211_input(ni, m, + URTWN_NODE(ni)->last_rssi - nf, nf); ieee80211_free_node(ni); } else { - (void)ieee80211_input_all(ic, m, rssi - nf, - nf); + /* Use last good global RSSI */ + (void)ieee80211_input_all(ic, m, + sc->last_rssi - nf, nf); } URTWN_LOCK(sc); m = next; } break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&sc->sc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } break; } finish: /* Finished receive; age anything left on the FF queue by a little bump */ /* * XXX TODO: just make this a callout timer schedule so we can * flush the FF staging queue if we're approaching idle. */ #ifdef IEEE80211_SUPPORT_SUPERG URTWN_UNLOCK(sc); ieee80211_ff_age_all(ic, 1); URTWN_LOCK(sc); #endif /* Kick-start more transmit in case we stalled */ urtwn_start(sc); } static void urtwn_txeof(struct urtwn_softc *sc, struct urtwn_data *data, int status) { URTWN_ASSERT_LOCKED(sc); if (data->ni != NULL) /* not a beacon frame */ ieee80211_tx_complete(data->ni, data->m, status); if (sc->sc_tx_n_active > 0) sc->sc_tx_n_active--; data->ni = NULL; data->m = NULL; sc->sc_txtimer = 0; STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next); } static int urtwn_alloc_list(struct urtwn_softc *sc, struct urtwn_data data[], int ndata, int maxsz) { int i, error; for (i = 0; i < ndata; i++) { struct urtwn_data *dp = &data[i]; dp->sc = sc; dp->m = NULL; dp->buf = malloc(maxsz, M_USBDEV, M_NOWAIT); if (dp->buf == NULL) { device_printf(sc->sc_dev, "could not allocate buffer\n"); error = ENOMEM; goto fail; } dp->ni = NULL; } return (0); fail: urtwn_free_list(sc, data, ndata); return (error); } static int urtwn_alloc_rx_list(struct urtwn_softc *sc) { int error, i; error = urtwn_alloc_list(sc, sc->sc_rx, URTWN_RX_LIST_COUNT, URTWN_RXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); for (i = 0; i < URTWN_RX_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next); return (0); } static int urtwn_alloc_tx_list(struct urtwn_softc *sc) { int error, i; error = urtwn_alloc_list(sc, sc->sc_tx, URTWN_TX_LIST_COUNT, URTWN_TXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_tx_active); STAILQ_INIT(&sc->sc_tx_inactive); STAILQ_INIT(&sc->sc_tx_pending); for (i = 0; i < URTWN_TX_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next); return (0); } static void urtwn_free_list(struct urtwn_softc *sc, struct urtwn_data data[], int ndata) { int i; for (i = 0; i < ndata; i++) { struct urtwn_data *dp = &data[i]; if (dp->buf != NULL) { free(dp->buf, M_USBDEV); dp->buf = NULL; } if (dp->ni != NULL) { ieee80211_free_node(dp->ni); dp->ni = NULL; } } } static void urtwn_free_rx_list(struct urtwn_softc *sc) { urtwn_free_list(sc, sc->sc_rx, URTWN_RX_LIST_COUNT); } static void urtwn_free_tx_list(struct urtwn_softc *sc) { urtwn_free_list(sc, sc->sc_tx, URTWN_TX_LIST_COUNT); } static void urtwn_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct urtwn_softc *sc = usbd_xfer_softc(xfer); #ifdef IEEE80211_SUPPORT_SUPERG struct ieee80211com *ic = &sc->sc_ic; #endif struct urtwn_data *data; URTWN_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)){ case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next); urtwn_txeof(sc, data, 0); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_tx_pending); if (data == NULL) { URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT, "%s: empty pending queue\n", __func__); sc->sc_tx_n_active = 0; goto finish; } STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next); STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen); usbd_transfer_submit(xfer); sc->sc_tx_n_active++; break; default: data = STAILQ_FIRST(&sc->sc_tx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next); urtwn_txeof(sc, data, 1); if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto tr_setup; } break; } finish: #ifdef IEEE80211_SUPPORT_SUPERG /* * If the TX active queue drops below a certain * threshold, ensure we age fast-frames out so they're * transmitted. */ if (sc->sc_tx_n_active <= 1) { /* XXX ew - net80211 should defer this for us! */ /* * Note: this sc_tx_n_active currently tracks * the number of pending transmit submissions * and not the actual depth of the TX frames * pending to the hardware. That means that * we're going to end up with some sub-optimal * aggregation behaviour. */ /* * XXX TODO: just make this a callout timer schedule so we can * flush the FF staging queue if we're approaching idle. */ URTWN_UNLOCK(sc); ieee80211_ff_flush(ic, WME_AC_VO); ieee80211_ff_flush(ic, WME_AC_VI); ieee80211_ff_flush(ic, WME_AC_BE); ieee80211_ff_flush(ic, WME_AC_BK); URTWN_LOCK(sc); } #endif /* Kick-start more transmit */ urtwn_start(sc); } static struct urtwn_data * _urtwn_getbuf(struct urtwn_softc *sc) { struct urtwn_data *bf; bf = STAILQ_FIRST(&sc->sc_tx_inactive); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next); else { URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT, "%s: out of xmit buffers\n", __func__); } return (bf); } static struct urtwn_data * urtwn_getbuf(struct urtwn_softc *sc) { struct urtwn_data *bf; URTWN_ASSERT_LOCKED(sc); bf = _urtwn_getbuf(sc); if (bf == NULL) { URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT, "%s: stop queue\n", __func__); } return (bf); } static usb_error_t urtwn_write_region_1(struct urtwn_softc *sc, uint16_t addr, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = R92C_REQ_REGS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (urtwn_do_request(sc, &req, buf)); } static usb_error_t urtwn_write_1(struct urtwn_softc *sc, uint16_t addr, uint8_t val) { return (urtwn_write_region_1(sc, addr, &val, sizeof(val))); } static usb_error_t urtwn_write_2(struct urtwn_softc *sc, uint16_t addr, uint16_t val) { val = htole16(val); return (urtwn_write_region_1(sc, addr, (uint8_t *)&val, sizeof(val))); } static usb_error_t urtwn_write_4(struct urtwn_softc *sc, uint16_t addr, uint32_t val) { val = htole32(val); return (urtwn_write_region_1(sc, addr, (uint8_t *)&val, sizeof(val))); } static usb_error_t urtwn_read_region_1(struct urtwn_softc *sc, uint16_t addr, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = R92C_REQ_REGS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (urtwn_do_request(sc, &req, buf)); } static uint8_t urtwn_read_1(struct urtwn_softc *sc, uint16_t addr) { uint8_t val; if (urtwn_read_region_1(sc, addr, &val, 1) != 0) return (0xff); return (val); } static uint16_t urtwn_read_2(struct urtwn_softc *sc, uint16_t addr) { uint16_t val; if (urtwn_read_region_1(sc, addr, (uint8_t *)&val, 2) != 0) return (0xffff); return (le16toh(val)); } static uint32_t urtwn_read_4(struct urtwn_softc *sc, uint16_t addr) { uint32_t val; if (urtwn_read_region_1(sc, addr, (uint8_t *)&val, 4) != 0) return (0xffffffff); return (le32toh(val)); } static int urtwn_fw_cmd(struct urtwn_softc *sc, uint8_t id, const void *buf, int len) { struct r92c_fw_cmd cmd; usb_error_t error; int ntries; if (!(sc->sc_flags & URTWN_FW_LOADED)) { URTWN_DPRINTF(sc, URTWN_DEBUG_FIRMWARE, "%s: firmware " "was not loaded; command (id %d) will be discarded\n", __func__, id); return (0); } /* Wait for current FW box to be empty. */ for (ntries = 0; ntries < 100; ntries++) { if (!(urtwn_read_1(sc, R92C_HMETFR) & (1 << sc->fwcur))) break; urtwn_ms_delay(sc); } if (ntries == 100) { device_printf(sc->sc_dev, "could not send firmware command\n"); return (ETIMEDOUT); } memset(&cmd, 0, sizeof(cmd)); cmd.id = id; if (len > 3) cmd.id |= R92C_CMD_FLAG_EXT; KASSERT(len <= sizeof(cmd.msg), ("urtwn_fw_cmd\n")); memcpy(cmd.msg, buf, len); /* Write the first word last since that will trigger the FW. */ error = urtwn_write_region_1(sc, R92C_HMEBOX_EXT(sc->fwcur), (uint8_t *)&cmd + 4, 2); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); error = urtwn_write_region_1(sc, R92C_HMEBOX(sc->fwcur), (uint8_t *)&cmd + 0, 4); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); sc->fwcur = (sc->fwcur + 1) % R92C_H2C_NBOX; return (0); } static void urtwn_cmdq_cb(void *arg, int pending) { struct urtwn_softc *sc = arg; struct urtwn_cmdq *item; /* * Device must be powered on (via urtwn_power_on()) * before any command may be sent. */ URTWN_LOCK(sc); if (!(sc->sc_flags & URTWN_RUNNING)) { URTWN_UNLOCK(sc); return; } URTWN_CMDQ_LOCK(sc); while (sc->cmdq[sc->cmdq_first].func != NULL) { item = &sc->cmdq[sc->cmdq_first]; sc->cmdq_first = (sc->cmdq_first + 1) % URTWN_CMDQ_SIZE; URTWN_CMDQ_UNLOCK(sc); item->func(sc, &item->data); URTWN_CMDQ_LOCK(sc); memset(item, 0, sizeof (*item)); } URTWN_CMDQ_UNLOCK(sc); URTWN_UNLOCK(sc); } static int urtwn_cmd_sleepable(struct urtwn_softc *sc, const void *ptr, size_t len, CMD_FUNC_PROTO) { struct ieee80211com *ic = &sc->sc_ic; KASSERT(len <= sizeof(union sec_param), ("buffer overflow")); URTWN_CMDQ_LOCK(sc); if (sc->cmdq[sc->cmdq_last].func != NULL) { device_printf(sc->sc_dev, "%s: cmdq overflow\n", __func__); URTWN_CMDQ_UNLOCK(sc); return (EAGAIN); } if (ptr != NULL) memcpy(&sc->cmdq[sc->cmdq_last].data, ptr, len); sc->cmdq[sc->cmdq_last].func = func; sc->cmdq_last = (sc->cmdq_last + 1) % URTWN_CMDQ_SIZE; URTWN_CMDQ_UNLOCK(sc); ieee80211_runtask(ic, &sc->cmdq_task); return (0); } static __inline void urtwn_rf_write(struct urtwn_softc *sc, int chain, uint8_t addr, uint32_t val) { sc->sc_rf_write(sc, chain, addr, val); } static void urtwn_r92c_rf_write(struct urtwn_softc *sc, int chain, uint8_t addr, uint32_t val) { urtwn_bb_write(sc, R92C_LSSI_PARAM(chain), SM(R92C_LSSI_PARAM_ADDR, addr) | SM(R92C_LSSI_PARAM_DATA, val)); } static void urtwn_r88e_rf_write(struct urtwn_softc *sc, int chain, uint8_t addr, uint32_t val) { urtwn_bb_write(sc, R92C_LSSI_PARAM(chain), SM(R88E_LSSI_PARAM_ADDR, addr) | SM(R92C_LSSI_PARAM_DATA, val)); } static uint32_t urtwn_rf_read(struct urtwn_softc *sc, int chain, uint8_t addr) { uint32_t reg[R92C_MAX_CHAINS], val; reg[0] = urtwn_bb_read(sc, R92C_HSSI_PARAM2(0)); if (chain != 0) reg[chain] = urtwn_bb_read(sc, R92C_HSSI_PARAM2(chain)); urtwn_bb_write(sc, R92C_HSSI_PARAM2(0), reg[0] & ~R92C_HSSI_PARAM2_READ_EDGE); urtwn_ms_delay(sc); urtwn_bb_write(sc, R92C_HSSI_PARAM2(chain), RW(reg[chain], R92C_HSSI_PARAM2_READ_ADDR, addr) | R92C_HSSI_PARAM2_READ_EDGE); urtwn_ms_delay(sc); urtwn_bb_write(sc, R92C_HSSI_PARAM2(0), reg[0] | R92C_HSSI_PARAM2_READ_EDGE); urtwn_ms_delay(sc); if (urtwn_bb_read(sc, R92C_HSSI_PARAM1(chain)) & R92C_HSSI_PARAM1_PI) val = urtwn_bb_read(sc, R92C_HSPI_READBACK(chain)); else val = urtwn_bb_read(sc, R92C_LSSI_READBACK(chain)); return (MS(val, R92C_LSSI_READBACK_DATA)); } static int urtwn_llt_write(struct urtwn_softc *sc, uint32_t addr, uint32_t data) { usb_error_t error; int ntries; error = urtwn_write_4(sc, R92C_LLT_INIT, SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) | SM(R92C_LLT_INIT_ADDR, addr) | SM(R92C_LLT_INIT_DATA, data)); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Wait for write operation to complete. */ for (ntries = 0; ntries < 20; ntries++) { if (MS(urtwn_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) == R92C_LLT_INIT_OP_NO_ACTIVE) return (0); urtwn_ms_delay(sc); } return (ETIMEDOUT); } static int urtwn_efuse_read_next(struct urtwn_softc *sc, uint8_t *val) { uint32_t reg; usb_error_t error; int ntries; if (sc->last_rom_addr >= URTWN_EFUSE_MAX_LEN) return (EFAULT); reg = urtwn_read_4(sc, R92C_EFUSE_CTRL); reg = RW(reg, R92C_EFUSE_CTRL_ADDR, sc->last_rom_addr); reg &= ~R92C_EFUSE_CTRL_VALID; error = urtwn_write_4(sc, R92C_EFUSE_CTRL, reg); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Wait for read operation to complete. */ for (ntries = 0; ntries < 100; ntries++) { reg = urtwn_read_4(sc, R92C_EFUSE_CTRL); if (reg & R92C_EFUSE_CTRL_VALID) break; urtwn_ms_delay(sc); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read efuse byte at address 0x%x\n", sc->last_rom_addr); return (ETIMEDOUT); } *val = MS(reg, R92C_EFUSE_CTRL_DATA); sc->last_rom_addr++; return (0); } static int urtwn_efuse_read_data(struct urtwn_softc *sc, uint8_t *rom, uint8_t off, uint8_t msk) { uint8_t reg; int i, error; for (i = 0; i < 4; i++) { if (msk & (1 << i)) continue; error = urtwn_efuse_read_next(sc, ®); if (error != 0) return (error); URTWN_DPRINTF(sc, URTWN_DEBUG_ROM, "rom[0x%03X] == 0x%02X\n", off * 8 + i * 2, reg); rom[off * 8 + i * 2 + 0] = reg; error = urtwn_efuse_read_next(sc, ®); if (error != 0) return (error); URTWN_DPRINTF(sc, URTWN_DEBUG_ROM, "rom[0x%03X] == 0x%02X\n", off * 8 + i * 2 + 1, reg); rom[off * 8 + i * 2 + 1] = reg; } return (0); } #ifdef USB_DEBUG static void urtwn_dump_rom_contents(struct urtwn_softc *sc, uint8_t *rom, uint16_t size) { int i; /* Dump ROM contents. */ device_printf(sc->sc_dev, "%s:", __func__); for (i = 0; i < size; i++) { if (i % 32 == 0) printf("\n%03X: ", i); else if (i % 4 == 0) printf(" "); printf("%02X", rom[i]); } printf("\n"); } #endif static int urtwn_efuse_read(struct urtwn_softc *sc, uint8_t *rom, uint16_t size) { #define URTWN_CHK(res) do { \ if ((error = res) != 0) \ goto end; \ } while(0) uint8_t msk, off, reg; int error; URTWN_CHK(urtwn_efuse_switch_power(sc)); /* Read full ROM image. */ sc->last_rom_addr = 0; memset(rom, 0xff, size); URTWN_CHK(urtwn_efuse_read_next(sc, ®)); while (reg != 0xff) { /* check for extended header */ if ((sc->chip & URTWN_CHIP_88E) && (reg & 0x1f) == 0x0f) { off = reg >> 5; URTWN_CHK(urtwn_efuse_read_next(sc, ®)); if ((reg & 0x0f) != 0x0f) off = ((reg & 0xf0) >> 1) | off; else continue; } else off = reg >> 4; msk = reg & 0xf; URTWN_CHK(urtwn_efuse_read_data(sc, rom, off, msk)); URTWN_CHK(urtwn_efuse_read_next(sc, ®)); } end: #ifdef USB_DEBUG if (sc->sc_debug & URTWN_DEBUG_ROM) urtwn_dump_rom_contents(sc, rom, size); #endif urtwn_write_1(sc, R92C_EFUSE_ACCESS, R92C_EFUSE_ACCESS_OFF); if (error != 0) { device_printf(sc->sc_dev, "%s: error while reading ROM\n", __func__); } return (error); #undef URTWN_CHK } static int urtwn_efuse_switch_power(struct urtwn_softc *sc) { usb_error_t error; uint32_t reg; error = urtwn_write_1(sc, R92C_EFUSE_ACCESS, R92C_EFUSE_ACCESS_ON); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); reg = urtwn_read_2(sc, R92C_SYS_ISO_CTRL); if (!(reg & R92C_SYS_ISO_CTRL_PWC_EV12V)) { error = urtwn_write_2(sc, R92C_SYS_ISO_CTRL, reg | R92C_SYS_ISO_CTRL_PWC_EV12V); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN); if (!(reg & R92C_SYS_FUNC_EN_ELDR)) { error = urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg | R92C_SYS_FUNC_EN_ELDR); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } reg = urtwn_read_2(sc, R92C_SYS_CLKR); if ((reg & (R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M)) != (R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M)) { error = urtwn_write_2(sc, R92C_SYS_CLKR, reg | R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } return (0); } static int urtwn_read_chipid(struct urtwn_softc *sc) { uint32_t reg; if (sc->chip & URTWN_CHIP_88E) return (0); reg = urtwn_read_4(sc, R92C_SYS_CFG); if (reg & R92C_SYS_CFG_TRP_VAUX_EN) return (EIO); if (reg & R92C_SYS_CFG_TYPE_92C) { sc->chip |= URTWN_CHIP_92C; /* Check if it is a castrated 8192C. */ if (MS(urtwn_read_4(sc, R92C_HPON_FSM), R92C_HPON_FSM_CHIP_BONDING_ID) == R92C_HPON_FSM_CHIP_BONDING_ID_92C_1T2R) sc->chip |= URTWN_CHIP_92C_1T2R; } if (reg & R92C_SYS_CFG_VENDOR_UMC) { sc->chip |= URTWN_CHIP_UMC; if (MS(reg, R92C_SYS_CFG_CHIP_VER_RTL) == 0) sc->chip |= URTWN_CHIP_UMC_A_CUT; } return (0); } static int urtwn_read_rom(struct urtwn_softc *sc) { struct r92c_rom *rom = &sc->rom.r92c_rom; int error; /* Read full ROM image. */ error = urtwn_efuse_read(sc, (uint8_t *)rom, sizeof(*rom)); if (error != 0) return (error); /* XXX Weird but this is what the vendor driver does. */ sc->last_rom_addr = 0x1fa; error = urtwn_efuse_read_next(sc, &sc->pa_setting); if (error != 0) return (error); URTWN_DPRINTF(sc, URTWN_DEBUG_ROM, "%s: PA setting=0x%x\n", __func__, sc->pa_setting); sc->board_type = MS(rom->rf_opt1, R92C_ROM_RF1_BOARD_TYPE); sc->regulatory = MS(rom->rf_opt1, R92C_ROM_RF1_REGULATORY); URTWN_DPRINTF(sc, URTWN_DEBUG_ROM, "%s: regulatory type=%d\n", __func__, sc->regulatory); IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, rom->macaddr); sc->sc_rf_write = urtwn_r92c_rf_write; sc->sc_power_on = urtwn_r92c_power_on; sc->sc_power_off = urtwn_r92c_power_off; return (0); } static int urtwn_r88e_read_rom(struct urtwn_softc *sc) { struct r88e_rom *rom = &sc->rom.r88e_rom; int error; error = urtwn_efuse_read(sc, (uint8_t *)rom, sizeof(sc->rom.r88e_rom)); if (error != 0) return (error); sc->bw20_tx_pwr_diff = (rom->tx_pwr_diff >> 4); if (sc->bw20_tx_pwr_diff & 0x08) sc->bw20_tx_pwr_diff |= 0xf0; sc->ofdm_tx_pwr_diff = (rom->tx_pwr_diff & 0xf); if (sc->ofdm_tx_pwr_diff & 0x08) sc->ofdm_tx_pwr_diff |= 0xf0; sc->regulatory = MS(rom->rf_board_opt, R92C_ROM_RF1_REGULATORY); URTWN_DPRINTF(sc, URTWN_DEBUG_ROM, "%s: regulatory type %d\n", __func__,sc->regulatory); IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, rom->macaddr); sc->sc_rf_write = urtwn_r88e_rf_write; sc->sc_power_on = urtwn_r88e_power_on; sc->sc_power_off = urtwn_r88e_power_off; return (0); } /* * Initialize rate adaptation in firmware. */ static int urtwn_ra_init(struct urtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni; struct ieee80211_rateset *rs, *rs_ht; struct r92c_fw_cmd_macid_cfg cmd; uint32_t rates, basicrates; uint8_t mode; int maxrate, maxbasicrate, error, i, j; ni = ieee80211_ref_node(vap->iv_bss); rs = &ni->ni_rates; rs_ht = (struct ieee80211_rateset *) &ni->ni_htrates; /* Get normal and basic rates mask. */ rates = basicrates = 0; maxrate = maxbasicrate = 0; /* This is for 11bg */ for (i = 0; i < rs->rs_nrates; i++) { /* Convert 802.11 rate to HW rate index. */ for (j = 0; j < nitems(ridx2rate); j++) if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == ridx2rate[j]) break; if (j == nitems(ridx2rate)) /* Unknown rate, skip. */ continue; rates |= 1 << j; if (j > maxrate) maxrate = j; if (rs->rs_rates[i] & IEEE80211_RATE_BASIC) { basicrates |= 1 << j; if (j > maxbasicrate) maxbasicrate = j; } } /* If we're doing 11n, enable 11n rates */ if (ni->ni_flags & IEEE80211_NODE_HT) { for (i = 0; i < rs_ht->rs_nrates; i++) { if ((rs_ht->rs_rates[i] & 0x7f) > 0xf) continue; /* 11n rates start at index 12 */ j = ((rs_ht->rs_rates[i]) & 0xf) + 12; rates |= (1 << j); /* Guard against the rate table being oddly ordered */ if (j > maxrate) maxrate = j; } } #if 0 if (ic->ic_curmode == IEEE80211_MODE_11NG) raid = R92C_RAID_11GN; #endif /* NB: group addressed frames are done at 11bg rates for now */ if (ic->ic_curmode == IEEE80211_MODE_11B) mode = R92C_RAID_11B; else mode = R92C_RAID_11BG; /* XXX misleading 'mode' value here for unicast frames */ URTWN_DPRINTF(sc, URTWN_DEBUG_RA, "%s: mode 0x%x, rates 0x%08x, basicrates 0x%08x\n", __func__, mode, rates, basicrates); /* Set rates mask for group addressed frames. */ cmd.macid = URTWN_MACID_BC | URTWN_MACID_VALID; cmd.mask = htole32(mode << 28 | basicrates); error = urtwn_fw_cmd(sc, R92C_CMD_MACID_CONFIG, &cmd, sizeof(cmd)); if (error != 0) { ieee80211_free_node(ni); device_printf(sc->sc_dev, "could not add broadcast station\n"); return (error); } /* Set initial MRR rate. */ URTWN_DPRINTF(sc, URTWN_DEBUG_RA, "%s: maxbasicrate %d\n", __func__, maxbasicrate); urtwn_write_1(sc, R92C_INIDATA_RATE_SEL(URTWN_MACID_BC), maxbasicrate); /* Set rates mask for unicast frames. */ if (ni->ni_flags & IEEE80211_NODE_HT) mode = R92C_RAID_11GN; else if (ic->ic_curmode == IEEE80211_MODE_11B) mode = R92C_RAID_11B; else mode = R92C_RAID_11BG; cmd.macid = URTWN_MACID_BSS | URTWN_MACID_VALID; cmd.mask = htole32(mode << 28 | rates); error = urtwn_fw_cmd(sc, R92C_CMD_MACID_CONFIG, &cmd, sizeof(cmd)); if (error != 0) { ieee80211_free_node(ni); device_printf(sc->sc_dev, "could not add BSS station\n"); return (error); } /* Set initial MRR rate. */ URTWN_DPRINTF(sc, URTWN_DEBUG_RA, "%s: maxrate %d\n", __func__, maxrate); urtwn_write_1(sc, R92C_INIDATA_RATE_SEL(URTWN_MACID_BSS), maxrate); /* Indicate highest supported rate. */ if (ni->ni_flags & IEEE80211_NODE_HT) ni->ni_txrate = rs_ht->rs_rates[rs_ht->rs_nrates - 1] | IEEE80211_RATE_MCS; else ni->ni_txrate = rs->rs_rates[rs->rs_nrates - 1]; ieee80211_free_node(ni); return (0); } static void urtwn_init_beacon(struct urtwn_softc *sc, struct urtwn_vap *uvp) { struct r92c_tx_desc *txd = &uvp->bcn_desc; txd->txdw0 = htole32( SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | R92C_TXDW0_BMCAST | R92C_TXDW0_OWN | R92C_TXDW0_FSG | R92C_TXDW0_LSG); txd->txdw1 = htole32( SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BEACON) | SM(R92C_TXDW1_RAID, R92C_RAID_11B)); if (sc->chip & URTWN_CHIP_88E) { txd->txdw1 |= htole32(SM(R88E_TXDW1_MACID, URTWN_MACID_BC)); txd->txdseq |= htole16(R88E_TXDSEQ_HWSEQ_EN); } else { txd->txdw1 |= htole32(SM(R92C_TXDW1_MACID, URTWN_MACID_BC)); txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ_EN); } txd->txdw4 = htole32(R92C_TXDW4_DRVRATE); txd->txdw5 = htole32(SM(R92C_TXDW5_DATARATE, URTWN_RIDX_CCK1)); } static int urtwn_setup_beacon(struct urtwn_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct urtwn_vap *uvp = URTWN_VAP(vap); struct mbuf *m; int error; URTWN_ASSERT_LOCKED(sc); if (ni->ni_chan == IEEE80211_CHAN_ANYC) return (EINVAL); m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate beacon frame\n", __func__); return (ENOMEM); } if (uvp->bcn_mbuf != NULL) m_freem(uvp->bcn_mbuf); uvp->bcn_mbuf = m; if ((error = urtwn_tx_beacon(sc, uvp)) != 0) return (error); /* XXX bcnq stuck workaround */ if ((error = urtwn_tx_beacon(sc, uvp)) != 0) return (error); URTWN_DPRINTF(sc, URTWN_DEBUG_BEACON, "%s: beacon was %srecognized\n", __func__, urtwn_read_1(sc, R92C_TDECTRL + 2) & (R92C_TDECTRL_BCN_VALID >> 16) ? "" : "not "); return (0); } static void urtwn_update_beacon(struct ieee80211vap *vap, int item) { struct urtwn_softc *sc = vap->iv_ic->ic_softc; struct urtwn_vap *uvp = URTWN_VAP(vap); struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211_node *ni = vap->iv_bss; int mcast = 0; URTWN_LOCK(sc); if (uvp->bcn_mbuf == NULL) { uvp->bcn_mbuf = ieee80211_beacon_alloc(ni); if (uvp->bcn_mbuf == NULL) { device_printf(sc->sc_dev, "%s: could not allocate beacon frame\n", __func__); URTWN_UNLOCK(sc); return; } } URTWN_UNLOCK(sc); if (item == IEEE80211_BEACON_TIM) mcast = 1; /* XXX */ setbit(bo->bo_flags, item); ieee80211_beacon_update(ni, uvp->bcn_mbuf, mcast); URTWN_LOCK(sc); urtwn_tx_beacon(sc, uvp); URTWN_UNLOCK(sc); } /* * Push a beacon frame into the chip. Beacon will * be repeated by the chip every R92C_BCN_INTERVAL. */ static int urtwn_tx_beacon(struct urtwn_softc *sc, struct urtwn_vap *uvp) { struct r92c_tx_desc *desc = &uvp->bcn_desc; struct urtwn_data *bf; URTWN_ASSERT_LOCKED(sc); bf = urtwn_getbuf(sc); if (bf == NULL) return (ENOMEM); memcpy(bf->buf, desc, sizeof(*desc)); urtwn_tx_start(sc, uvp->bcn_mbuf, IEEE80211_FC0_TYPE_MGT, bf); sc->sc_txtimer = 5; callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); return (0); } static int urtwn_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct urtwn_softc *sc = vap->iv_ic->ic_softc; uint8_t i; if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { URTWN_LOCK(sc); /* * First 4 slots for group keys, * what is left - for pairwise. * XXX incompatible with IBSS RSN. */ for (i = IEEE80211_WEP_NKID; i < R92C_CAM_ENTRY_COUNT; i++) { if ((sc->keys_bmap & (1 << i)) == 0) { sc->keys_bmap |= 1 << i; *keyix = i; break; } } URTWN_UNLOCK(sc); if (i == R92C_CAM_ENTRY_COUNT) { device_printf(sc->sc_dev, "%s: no free space in the key table\n", __func__); return 0; } } else *keyix = 0; } else { *keyix = k - vap->iv_nw_keys; } *rxkeyix = *keyix; return 1; } static void urtwn_key_set_cb(struct urtwn_softc *sc, union sec_param *data) { struct ieee80211_key *k = &data->key; uint8_t algo, keyid; int i, error; if (k->wk_keyix < IEEE80211_WEP_NKID) keyid = k->wk_keyix; else keyid = 0; /* Map net80211 cipher to HW crypto algorithm. */ switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: if (k->wk_keylen < 8) algo = R92C_CAM_ALGO_WEP40; else algo = R92C_CAM_ALGO_WEP104; break; case IEEE80211_CIPHER_TKIP: algo = R92C_CAM_ALGO_TKIP; break; case IEEE80211_CIPHER_AES_CCM: algo = R92C_CAM_ALGO_AES; break; default: device_printf(sc->sc_dev, "%s: undefined cipher %d\n", __func__, k->wk_cipher->ic_cipher); return; } URTWN_DPRINTF(sc, URTWN_DEBUG_KEY, "%s: keyix %d, keyid %d, algo %d/%d, flags %04X, len %d, " "macaddr %s\n", __func__, k->wk_keyix, keyid, k->wk_cipher->ic_cipher, algo, k->wk_flags, k->wk_keylen, ether_sprintf(k->wk_macaddr)); /* Write key. */ for (i = 0; i < 4; i++) { error = urtwn_cam_write(sc, R92C_CAM_KEY(k->wk_keyix, i), LE_READ_4(&k->wk_key[i * 4])); if (error != 0) goto fail; } /* Write CTL0 last since that will validate the CAM entry. */ error = urtwn_cam_write(sc, R92C_CAM_CTL1(k->wk_keyix), LE_READ_4(&k->wk_macaddr[2])); if (error != 0) goto fail; error = urtwn_cam_write(sc, R92C_CAM_CTL0(k->wk_keyix), SM(R92C_CAM_ALGO, algo) | SM(R92C_CAM_KEYID, keyid) | SM(R92C_CAM_MACLO, LE_READ_2(&k->wk_macaddr[0])) | R92C_CAM_VALID); if (error != 0) goto fail; return; fail: device_printf(sc->sc_dev, "%s fails, error %d\n", __func__, error); } static void urtwn_key_del_cb(struct urtwn_softc *sc, union sec_param *data) { struct ieee80211_key *k = &data->key; int i; URTWN_DPRINTF(sc, URTWN_DEBUG_KEY, "%s: keyix %d, flags %04X, macaddr %s\n", __func__, k->wk_keyix, k->wk_flags, ether_sprintf(k->wk_macaddr)); urtwn_cam_write(sc, R92C_CAM_CTL0(k->wk_keyix), 0); urtwn_cam_write(sc, R92C_CAM_CTL1(k->wk_keyix), 0); /* Clear key. */ for (i = 0; i < 4; i++) urtwn_cam_write(sc, R92C_CAM_KEY(k->wk_keyix, i), 0); sc->keys_bmap &= ~(1 << k->wk_keyix); } static int urtwn_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct urtwn_softc *sc = vap->iv_ic->ic_softc; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return (1); } return (!urtwn_cmd_sleepable(sc, k, sizeof(*k), urtwn_key_set_cb)); } static int urtwn_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct urtwn_softc *sc = vap->iv_ic->ic_softc; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return (1); } return (!urtwn_cmd_sleepable(sc, k, sizeof(*k), urtwn_key_del_cb)); } static void urtwn_tsf_task_adhoc(void *arg, int pending) { struct ieee80211vap *vap = arg; struct urtwn_softc *sc = vap->iv_ic->ic_softc; struct ieee80211_node *ni; uint32_t reg; URTWN_LOCK(sc); ni = ieee80211_ref_node(vap->iv_bss); reg = urtwn_read_1(sc, R92C_BCN_CTRL); /* Accept beacons with the same BSSID. */ urtwn_set_rx_bssid_all(sc, 0); /* Enable synchronization. */ reg &= ~R92C_BCN_CTRL_DIS_TSF_UDT0; urtwn_write_1(sc, R92C_BCN_CTRL, reg); /* Synchronize. */ usb_pause_mtx(&sc->sc_mtx, hz * ni->ni_intval * 5 / 1000); /* Disable synchronization. */ reg |= R92C_BCN_CTRL_DIS_TSF_UDT0; urtwn_write_1(sc, R92C_BCN_CTRL, reg); /* Remove beacon filter. */ urtwn_set_rx_bssid_all(sc, 1); /* Enable beaconing. */ urtwn_write_1(sc, R92C_MBID_NUM, urtwn_read_1(sc, R92C_MBID_NUM) | R92C_MBID_TXBCN_RPT0); reg |= R92C_BCN_CTRL_EN_BCN; urtwn_write_1(sc, R92C_BCN_CTRL, reg); ieee80211_free_node(ni); URTWN_UNLOCK(sc); } static void urtwn_tsf_sync_enable(struct urtwn_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = &sc->sc_ic; struct urtwn_vap *uvp = URTWN_VAP(vap); /* Reset TSF. */ urtwn_write_1(sc, R92C_DUAL_TSF_RST, R92C_DUAL_TSF_RST0); switch (vap->iv_opmode) { case IEEE80211_M_STA: /* Enable TSF synchronization. */ urtwn_write_1(sc, R92C_BCN_CTRL, urtwn_read_1(sc, R92C_BCN_CTRL) & ~R92C_BCN_CTRL_DIS_TSF_UDT0); break; case IEEE80211_M_IBSS: ieee80211_runtask(ic, &uvp->tsf_task_adhoc); break; case IEEE80211_M_HOSTAP: /* Enable beaconing. */ urtwn_write_1(sc, R92C_MBID_NUM, urtwn_read_1(sc, R92C_MBID_NUM) | R92C_MBID_TXBCN_RPT0); urtwn_write_1(sc, R92C_BCN_CTRL, urtwn_read_1(sc, R92C_BCN_CTRL) | R92C_BCN_CTRL_EN_BCN); break; default: device_printf(sc->sc_dev, "undefined opmode %d\n", vap->iv_opmode); return; } } static void urtwn_get_tsf(struct urtwn_softc *sc, uint64_t *buf) { urtwn_read_region_1(sc, R92C_TSFTR, (uint8_t *)buf, sizeof(*buf)); } static void urtwn_set_led(struct urtwn_softc *sc, int led, int on) { uint8_t reg; if (led == URTWN_LED_LINK) { if (sc->chip & URTWN_CHIP_88E) { reg = urtwn_read_1(sc, R92C_LEDCFG2) & 0xf0; urtwn_write_1(sc, R92C_LEDCFG2, reg | 0x60); if (!on) { reg = urtwn_read_1(sc, R92C_LEDCFG2) & 0x90; urtwn_write_1(sc, R92C_LEDCFG2, reg | R92C_LEDCFG0_DIS); urtwn_write_1(sc, R92C_MAC_PINMUX_CFG, urtwn_read_1(sc, R92C_MAC_PINMUX_CFG) & 0xfe); } } else { reg = urtwn_read_1(sc, R92C_LEDCFG0) & 0x70; if (!on) reg |= R92C_LEDCFG0_DIS; urtwn_write_1(sc, R92C_LEDCFG0, reg); } sc->ledlink = on; /* Save LED state. */ } } static void urtwn_set_mode(struct urtwn_softc *sc, uint8_t mode) { uint8_t reg; reg = urtwn_read_1(sc, R92C_MSR); reg = (reg & ~R92C_MSR_MASK) | mode; urtwn_write_1(sc, R92C_MSR, reg); } static void urtwn_ibss_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct urtwn_softc *sc = vap->iv_ic->ic_softc; struct urtwn_vap *uvp = URTWN_VAP(vap); uint64_t ni_tstamp, curr_tstamp; uvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf); if (vap->iv_state == IEEE80211_S_RUN && (subtype == IEEE80211_FC0_SUBTYPE_BEACON || subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) { ni_tstamp = le64toh(ni->ni_tstamp.tsf); URTWN_LOCK(sc); urtwn_get_tsf(sc, &curr_tstamp); URTWN_UNLOCK(sc); curr_tstamp = le64toh(curr_tstamp); if (ni_tstamp >= curr_tstamp) (void) ieee80211_ibss_merge(ni); } } static int urtwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct urtwn_vap *uvp = URTWN_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct urtwn_softc *sc = ic->ic_softc; struct ieee80211_node *ni; enum ieee80211_state ostate; uint32_t reg; uint8_t mode; int error = 0; ostate = vap->iv_state; URTWN_DPRINTF(sc, URTWN_DEBUG_STATE, "%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); URTWN_LOCK(sc); callout_stop(&sc->sc_watchdog_ch); if (ostate == IEEE80211_S_RUN) { /* Stop calibration. */ callout_stop(&sc->sc_calib_to); /* Turn link LED off. */ urtwn_set_led(sc, URTWN_LED_LINK, 0); /* Set media status to 'No Link'. */ urtwn_set_mode(sc, R92C_MSR_NOLINK); /* Stop Rx of data frames. */ urtwn_write_2(sc, R92C_RXFLTMAP2, 0); /* Disable TSF synchronization. */ urtwn_write_1(sc, R92C_BCN_CTRL, (urtwn_read_1(sc, R92C_BCN_CTRL) & ~R92C_BCN_CTRL_EN_BCN) | R92C_BCN_CTRL_DIS_TSF_UDT0); /* Disable beaconing. */ urtwn_write_1(sc, R92C_MBID_NUM, urtwn_read_1(sc, R92C_MBID_NUM) & ~R92C_MBID_TXBCN_RPT0); /* Reset TSF. */ urtwn_write_1(sc, R92C_DUAL_TSF_RST, R92C_DUAL_TSF_RST0); /* Reset EDCA parameters. */ urtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002f3217); urtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005e4317); urtwn_write_4(sc, R92C_EDCA_BE_PARAM, 0x00105320); urtwn_write_4(sc, R92C_EDCA_BK_PARAM, 0x0000a444); } switch (nstate) { case IEEE80211_S_INIT: /* Turn link LED off. */ urtwn_set_led(sc, URTWN_LED_LINK, 0); break; case IEEE80211_S_SCAN: /* Pause AC Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, urtwn_read_1(sc, R92C_TXPAUSE) | R92C_TX_QUEUE_AC); break; case IEEE80211_S_AUTH: urtwn_set_chan(sc, ic->ic_curchan, NULL); break; case IEEE80211_S_RUN: if (vap->iv_opmode == IEEE80211_M_MONITOR) { /* Turn link LED on. */ urtwn_set_led(sc, URTWN_LED_LINK, 1); break; } ni = ieee80211_ref_node(vap->iv_bss); if (ic->ic_bsschan == IEEE80211_CHAN_ANYC || ni->ni_chan == IEEE80211_CHAN_ANYC) { device_printf(sc->sc_dev, "%s: could not move to RUN state\n", __func__); error = EINVAL; goto end_run; } switch (vap->iv_opmode) { case IEEE80211_M_STA: mode = R92C_MSR_INFRA; break; case IEEE80211_M_IBSS: mode = R92C_MSR_ADHOC; break; case IEEE80211_M_HOSTAP: mode = R92C_MSR_AP; break; default: device_printf(sc->sc_dev, "undefined opmode %d\n", vap->iv_opmode); error = EINVAL; goto end_run; } /* Set media status to 'Associated'. */ urtwn_set_mode(sc, mode); /* Set BSSID. */ urtwn_write_4(sc, R92C_BSSID + 0, LE_READ_4(&ni->ni_bssid[0])); urtwn_write_4(sc, R92C_BSSID + 4, LE_READ_2(&ni->ni_bssid[4])); if (ic->ic_curmode == IEEE80211_MODE_11B) urtwn_write_1(sc, R92C_INIRTS_RATE_SEL, 0); else /* 802.11b/g */ urtwn_write_1(sc, R92C_INIRTS_RATE_SEL, 3); /* Enable Rx of data frames. */ urtwn_write_2(sc, R92C_RXFLTMAP2, 0xffff); /* Flush all AC queues. */ urtwn_write_1(sc, R92C_TXPAUSE, 0); /* Set beacon interval. */ urtwn_write_2(sc, R92C_BCN_INTERVAL, ni->ni_intval); /* Allow Rx from our BSSID only. */ if (ic->ic_promisc == 0) { reg = urtwn_read_4(sc, R92C_RCR); if (vap->iv_opmode != IEEE80211_M_HOSTAP) reg |= R92C_RCR_CBSSID_DATA; if (vap->iv_opmode != IEEE80211_M_IBSS) reg |= R92C_RCR_CBSSID_BCN; urtwn_write_4(sc, R92C_RCR, reg); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { error = urtwn_setup_beacon(sc, ni); if (error != 0) { device_printf(sc->sc_dev, "unable to push beacon into the chip, " "error %d\n", error); goto end_run; } } /* Enable TSF synchronization. */ urtwn_tsf_sync_enable(sc, vap); urtwn_write_1(sc, R92C_SIFS_CCK + 1, 10); urtwn_write_1(sc, R92C_SIFS_OFDM + 1, 10); urtwn_write_1(sc, R92C_SPEC_SIFS + 1, 10); urtwn_write_1(sc, R92C_MAC_SPEC_SIFS + 1, 10); urtwn_write_1(sc, R92C_R2T_SIFS + 1, 10); urtwn_write_1(sc, R92C_T2T_SIFS + 1, 10); /* Intialize rate adaptation. */ if (!(sc->chip & URTWN_CHIP_88E)) urtwn_ra_init(sc); /* Turn link LED on. */ urtwn_set_led(sc, URTWN_LED_LINK, 1); sc->avg_pwdb = -1; /* Reset average RSSI. */ /* Reset temperature calibration state machine. */ sc->sc_flags &= ~URTWN_TEMP_MEASURED; sc->thcal_lctemp = 0; /* Start periodic calibration. */ callout_reset(&sc->sc_calib_to, 2*hz, urtwn_calib_to, sc); end_run: ieee80211_free_node(ni); break; default: break; } URTWN_UNLOCK(sc); IEEE80211_LOCK(ic); return (error != 0 ? error : uvp->newstate(vap, nstate, arg)); } static void urtwn_calib_to(void *arg) { struct urtwn_softc *sc = arg; /* Do it in a process context. */ urtwn_cmd_sleepable(sc, NULL, 0, urtwn_calib_cb); } static void urtwn_calib_cb(struct urtwn_softc *sc, union sec_param *data) { /* Do temperature compensation. */ urtwn_temp_calib(sc); if ((urtwn_read_1(sc, R92C_MSR) & R92C_MSR_MASK) != R92C_MSR_NOLINK) callout_reset(&sc->sc_calib_to, 2*hz, urtwn_calib_to, sc); } static void urtwn_watchdog(void *arg) { struct urtwn_softc *sc = arg; if (sc->sc_txtimer > 0) { if (--sc->sc_txtimer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(sc->sc_ic.ic_oerrors, 1); return; } callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); } } static void urtwn_update_avgrssi(struct urtwn_softc *sc, int rate, int8_t rssi) { int pwdb; /* Convert antenna signal to percentage. */ if (rssi <= -100 || rssi >= 20) pwdb = 0; else if (rssi >= 0) pwdb = 100; else pwdb = 100 + rssi; if (!(sc->chip & URTWN_CHIP_88E)) { if (rate <= URTWN_RIDX_CCK11) { /* CCK gain is smaller than OFDM/MCS gain. */ pwdb += 6; if (pwdb > 100) pwdb = 100; if (pwdb <= 14) pwdb -= 4; else if (pwdb <= 26) pwdb -= 8; else if (pwdb <= 34) pwdb -= 6; else if (pwdb <= 42) pwdb -= 2; } } if (sc->avg_pwdb == -1) /* Init. */ sc->avg_pwdb = pwdb; else if (sc->avg_pwdb < pwdb) sc->avg_pwdb = ((sc->avg_pwdb * 19 + pwdb) / 20) + 1; else sc->avg_pwdb = ((sc->avg_pwdb * 19 + pwdb) / 20); URTWN_DPRINTF(sc, URTWN_DEBUG_RSSI, "%s: PWDB %d, EMA %d\n", __func__, pwdb, sc->avg_pwdb); } static int8_t urtwn_get_rssi(struct urtwn_softc *sc, int rate, void *physt) { static const int8_t cckoff[] = { 16, -12, -26, -46 }; struct r92c_rx_phystat *phy; struct r92c_rx_cck *cck; uint8_t rpt; int8_t rssi; if (rate <= URTWN_RIDX_CCK11) { cck = (struct r92c_rx_cck *)physt; if (sc->sc_flags & URTWN_FLAG_CCK_HIPWR) { rpt = (cck->agc_rpt >> 5) & 0x3; rssi = (cck->agc_rpt & 0x1f) << 1; } else { rpt = (cck->agc_rpt >> 6) & 0x3; rssi = cck->agc_rpt & 0x3e; } rssi = cckoff[rpt] - rssi; } else { /* OFDM/HT. */ phy = (struct r92c_rx_phystat *)physt; rssi = ((le32toh(phy->phydw1) >> 1) & 0x7f) - 110; } return (rssi); } static int8_t urtwn_r88e_get_rssi(struct urtwn_softc *sc, int rate, void *physt) { struct r92c_rx_phystat *phy; struct r88e_rx_cck *cck; uint8_t cck_agc_rpt, lna_idx, vga_idx; int8_t rssi; rssi = 0; if (rate <= URTWN_RIDX_CCK11) { cck = (struct r88e_rx_cck *)physt; cck_agc_rpt = cck->agc_rpt; lna_idx = (cck_agc_rpt & 0xe0) >> 5; vga_idx = cck_agc_rpt & 0x1f; switch (lna_idx) { case 7: if (vga_idx <= 27) rssi = -100 + 2* (27 - vga_idx); else rssi = -100; break; case 6: rssi = -48 + 2 * (2 - vga_idx); break; case 5: rssi = -42 + 2 * (7 - vga_idx); break; case 4: rssi = -36 + 2 * (7 - vga_idx); break; case 3: rssi = -24 + 2 * (7 - vga_idx); break; case 2: rssi = -12 + 2 * (5 - vga_idx); break; case 1: rssi = 8 - (2 * vga_idx); break; case 0: rssi = 14 - (2 * vga_idx); break; } rssi += 6; } else { /* OFDM/HT. */ phy = (struct r92c_rx_phystat *)physt; rssi = ((le32toh(phy->phydw1) >> 1) & 0x7f) - 110; } return (rssi); } static __inline uint8_t rate2ridx(uint8_t rate) { if (rate & IEEE80211_RATE_MCS) { /* 11n rates start at idx 12 */ return ((rate & 0xf) + 12); } switch (rate) { /* 11g */ case 12: return 4; case 18: return 5; case 24: return 6; case 36: return 7; case 48: return 8; case 72: return 9; case 96: return 10; case 108: return 11; /* 11b */ case 2: return 0; case 4: return 1; case 11: return 2; case 22: return 3; default: return 0; } } static int urtwn_tx_data(struct urtwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m, struct urtwn_data *data) { const struct ieee80211_txparam *tp; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_key *k = NULL; struct ieee80211_channel *chan; struct ieee80211_frame *wh; struct r92c_tx_desc *txd; uint8_t macid, raid, rate, ridx, subtype, type, tid, qsel; int hasqos, ismcast; URTWN_ASSERT_LOCKED(sc); /* * Software crypto. */ wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; hasqos = IEEE80211_QOS_HAS_SEQ(wh); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); /* Select TX ring for this frame. */ if (hasqos) { tid = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; tid &= IEEE80211_QOS_TID; } else tid = 0; chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? ni->ni_chan : ic->ic_curchan; tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; /* Choose a TX rate index. */ if (type == IEEE80211_FC0_TYPE_MGT) rate = tp->mgmtrate; else if (ismcast) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else if (m->m_flags & M_EAPOL) rate = tp->mgmtrate; else { if (URTWN_CHIP_HAS_RATECTL(sc)) { /* XXX pass pktlen */ (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } else { /* XXX TODO: drop the default rate for 11b/11g? */ if (ni->ni_flags & IEEE80211_NODE_HT) rate = IEEE80211_RATE_MCS | 0x4; /* MCS4 */ else if (ic->ic_curmode != IEEE80211_MODE_11B) rate = 108; else rate = 22; } } /* * XXX TODO: this should be per-node, for 11b versus 11bg * nodes in hostap mode */ ridx = rate2ridx(rate); if (ni->ni_flags & IEEE80211_NODE_HT) raid = R92C_RAID_11GN; else if (ic->ic_curmode != IEEE80211_MODE_11B) raid = R92C_RAID_11BG; else raid = R92C_RAID_11B; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { device_printf(sc->sc_dev, "ieee80211_crypto_encap returns NULL.\n"); return (ENOBUFS); } /* in case packet header moved, reset pointer */ wh = mtod(m, struct ieee80211_frame *); } /* Fill Tx descriptor. */ txd = (struct r92c_tx_desc *)data->buf; memset(txd, 0, sizeof(*txd)); txd->txdw0 |= htole32( SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | R92C_TXDW0_OWN | R92C_TXDW0_FSG | R92C_TXDW0_LSG); if (ismcast) txd->txdw0 |= htole32(R92C_TXDW0_BMCAST); if (!ismcast) { if (sc->chip & URTWN_CHIP_88E) { struct urtwn_node *un = URTWN_NODE(ni); macid = un->id; } else macid = URTWN_MACID_BSS; if (type == IEEE80211_FC0_TYPE_DATA) { qsel = tid % URTWN_MAX_TID; if (sc->chip & URTWN_CHIP_88E) { txd->txdw2 |= htole32( R88E_TXDW2_AGGBK | R88E_TXDW2_CCX_RPT); } else txd->txdw1 |= htole32(R92C_TXDW1_AGGBK); /* protmode, non-HT */ /* XXX TODO: noack frames? */ if ((rate & 0x80) == 0 && (ic->ic_flags & IEEE80211_F_USEPROT)) { switch (ic->ic_protmode) { case IEEE80211_PROT_CTSONLY: txd->txdw4 |= htole32( R92C_TXDW4_CTS2SELF | R92C_TXDW4_HWRTSEN); break; case IEEE80211_PROT_RTSCTS: txd->txdw4 |= htole32( R92C_TXDW4_RTSEN | R92C_TXDW4_HWRTSEN); break; default: break; } } /* protmode, HT */ /* XXX TODO: noack frames? */ if ((rate & 0x80) && (ic->ic_htprotmode == IEEE80211_PROT_RTSCTS)) { txd->txdw4 |= htole32( R92C_TXDW4_RTSEN | R92C_TXDW4_HWRTSEN); } /* XXX TODO: rtsrate is configurable? 24mbit may * be a bit high for RTS rate? */ txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, URTWN_RIDX_OFDM24)); txd->txdw5 |= htole32(0x0001ff00); } else /* IEEE80211_FC0_TYPE_MGT */ qsel = R92C_TXDW1_QSEL_MGNT; } else { macid = URTWN_MACID_BC; qsel = R92C_TXDW1_QSEL_MGNT; } txd->txdw1 |= htole32( SM(R92C_TXDW1_QSEL, qsel) | SM(R92C_TXDW1_RAID, raid)); /* XXX TODO: 40MHZ flag? */ /* XXX TODO: AMPDU flag? (AGG_ENABLE or AGG_BREAK?) Density shift? */ /* XXX Short preamble? */ /* XXX Short-GI? */ if (sc->chip & URTWN_CHIP_88E) txd->txdw1 |= htole32(SM(R88E_TXDW1_MACID, macid)); else txd->txdw1 |= htole32(SM(R92C_TXDW1_MACID, macid)); txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, ridx)); /* Force this rate if needed. */ if (URTWN_CHIP_HAS_RATECTL(sc) || ismcast || (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) || (m->m_flags & M_EAPOL) || type != IEEE80211_FC0_TYPE_DATA) txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE); if (!hasqos) { /* Use HW sequence numbering for non-QoS frames. */ if (sc->chip & URTWN_CHIP_88E) txd->txdseq = htole16(R88E_TXDSEQ_HWSEQ_EN); else txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ_EN); } else { /* Set sequence number. */ txd->txdseq = htole16(M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE); } if (k != NULL && !(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { uint8_t cipher; switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: case IEEE80211_CIPHER_TKIP: cipher = R92C_TXDW1_CIPHER_RC4; break; case IEEE80211_CIPHER_AES_CCM: cipher = R92C_TXDW1_CIPHER_AES; break; default: device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, k->wk_cipher->ic_cipher); return (EINVAL); } txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher)); } if (ieee80211_radiotap_active_vap(vap)) { struct urtwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; if (k != NULL) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; ieee80211_radiotap_tx(vap, m); } data->ni = ni; urtwn_tx_start(sc, m, type, data); return (0); } static int urtwn_tx_raw(struct urtwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m, struct urtwn_data *data, const struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_key *k = NULL; struct ieee80211_frame *wh; struct r92c_tx_desc *txd; uint8_t cipher, ridx, type; /* Encrypt the frame if need be. */ cipher = R92C_TXDW1_CIPHER_NONE; if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { /* Retrieve key for TX. */ k = ieee80211_crypto_encap(ni, m); if (k == NULL) return (ENOBUFS); if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: case IEEE80211_CIPHER_TKIP: cipher = R92C_TXDW1_CIPHER_RC4; break; case IEEE80211_CIPHER_AES_CCM: cipher = R92C_TXDW1_CIPHER_AES; break; default: device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, k->wk_cipher->ic_cipher); return (EINVAL); } } } /* XXX TODO: 11n checks, matching urtwn_tx_data() */ wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; /* Fill Tx descriptor. */ txd = (struct r92c_tx_desc *)data->buf; memset(txd, 0, sizeof(*txd)); txd->txdw0 |= htole32( SM(R92C_TXDW0_OFFSET, sizeof(*txd)) | R92C_TXDW0_OWN | R92C_TXDW0_FSG | R92C_TXDW0_LSG); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) txd->txdw0 |= htole32(R92C_TXDW0_BMCAST); if (params->ibp_flags & IEEE80211_BPF_RTS) txd->txdw4 |= htole32(R92C_TXDW4_RTSEN); if (params->ibp_flags & IEEE80211_BPF_CTS) txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF); if (txd->txdw4 & htole32(R92C_TXDW4_RTSEN | R92C_TXDW4_CTS2SELF)) { txd->txdw4 |= htole32(R92C_TXDW4_HWRTSEN); txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, URTWN_RIDX_OFDM24)); } if (sc->chip & URTWN_CHIP_88E) txd->txdw1 |= htole32(SM(R88E_TXDW1_MACID, URTWN_MACID_BC)); else txd->txdw1 |= htole32(SM(R92C_TXDW1_MACID, URTWN_MACID_BC)); /* XXX TODO: rate index/config (RAID) for 11n? */ txd->txdw1 |= htole32(SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT)); txd->txdw1 |= htole32(SM(R92C_TXDW1_CIPHER, cipher)); /* Choose a TX rate index. */ ridx = rate2ridx(params->ibp_rate0); txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, ridx)); txd->txdw5 |= htole32(0x0001ff00); txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE); if (!IEEE80211_QOS_HAS_SEQ(wh)) { /* Use HW sequence numbering for non-QoS frames. */ if (sc->chip & URTWN_CHIP_88E) txd->txdseq = htole16(R88E_TXDSEQ_HWSEQ_EN); else txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ_EN); } else { /* Set sequence number. */ txd->txdseq = htole16(M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE); } if (ieee80211_radiotap_active_vap(vap)) { struct urtwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; if (k != NULL) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; ieee80211_radiotap_tx(vap, m); } data->ni = ni; urtwn_tx_start(sc, m, type, data); return (0); } static void urtwn_tx_start(struct urtwn_softc *sc, struct mbuf *m, uint8_t type, struct urtwn_data *data) { struct usb_xfer *xfer; struct r92c_tx_desc *txd; uint16_t ac, sum; int i, xferlen; URTWN_ASSERT_LOCKED(sc); ac = M_WME_GETAC(m); switch (type) { case IEEE80211_FC0_TYPE_CTL: case IEEE80211_FC0_TYPE_MGT: xfer = sc->sc_xfer[URTWN_BULK_TX_VO]; break; default: xfer = sc->sc_xfer[wme2queue[ac].qid]; break; } txd = (struct r92c_tx_desc *)data->buf; txd->txdw0 |= htole32(SM(R92C_TXDW0_PKTLEN, m->m_pkthdr.len)); /* Compute Tx descriptor checksum. */ sum = 0; for (i = 0; i < sizeof(*txd) / 2; i++) sum ^= ((uint16_t *)txd)[i]; txd->txdsum = sum; /* NB: already little endian. */ xferlen = sizeof(*txd) + m->m_pkthdr.len; m_copydata(m, 0, m->m_pkthdr.len, (caddr_t)&txd[1]); data->buflen = xferlen; data->m = m; STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next); usbd_transfer_start(xfer); } static int urtwn_transmit(struct ieee80211com *ic, struct mbuf *m) { struct urtwn_softc *sc = ic->ic_softc; int error; URTWN_LOCK(sc); if ((sc->sc_flags & URTWN_RUNNING) == 0) { URTWN_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { URTWN_UNLOCK(sc); return (error); } urtwn_start(sc); URTWN_UNLOCK(sc); return (0); } static void urtwn_start(struct urtwn_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; struct urtwn_data *bf; URTWN_ASSERT_LOCKED(sc); while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { bf = urtwn_getbuf(sc); if (bf == NULL) { mbufq_prepend(&sc->sc_snd, m); break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT, "%s: called; m=%p\n", __func__, m); if (urtwn_tx_data(sc, ni, m, bf) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); m_freem(m); ieee80211_free_node(ni); break; } sc->sc_txtimer = 5; callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); } } static void urtwn_parent(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_softc; URTWN_LOCK(sc); if (sc->sc_flags & URTWN_DETACHED) { URTWN_UNLOCK(sc); return; } URTWN_UNLOCK(sc); if (ic->ic_nrunning > 0) { if (urtwn_init(sc) != 0) { struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (vap != NULL) ieee80211_stop(vap); } else ieee80211_start_all(ic); } else urtwn_stop(sc); } static __inline int urtwn_power_on(struct urtwn_softc *sc) { return sc->sc_power_on(sc); } static int urtwn_r92c_power_on(struct urtwn_softc *sc) { uint32_t reg; usb_error_t error; int ntries; /* Wait for autoload done bit. */ for (ntries = 0; ntries < 1000; ntries++) { if (urtwn_read_1(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_PFM_ALDN) break; urtwn_ms_delay(sc); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for chip autoload\n"); return (ETIMEDOUT); } /* Unlock ISO/CLK/Power control register. */ error = urtwn_write_1(sc, R92C_RSV_CTRL, 0); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Move SPS into PWM mode. */ error = urtwn_write_1(sc, R92C_SPS0_CTRL, 0x2b); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); urtwn_ms_delay(sc); reg = urtwn_read_1(sc, R92C_LDOV12D_CTRL); if (!(reg & R92C_LDOV12D_CTRL_LDV12_EN)) { error = urtwn_write_1(sc, R92C_LDOV12D_CTRL, reg | R92C_LDOV12D_CTRL_LDV12_EN); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); urtwn_ms_delay(sc); error = urtwn_write_1(sc, R92C_SYS_ISO_CTRL, urtwn_read_1(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_MD2PP); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } /* Auto enable WLAN. */ error = urtwn_write_2(sc, R92C_APS_FSMCO, urtwn_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); for (ntries = 0; ntries < 1000; ntries++) { if (!(urtwn_read_2(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_APFM_ONMAC)) break; urtwn_ms_delay(sc); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for MAC auto ON\n"); return (ETIMEDOUT); } /* Enable radio, GPIO and LED functions. */ error = urtwn_write_2(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_PDN_EN | R92C_APS_FSMCO_PFM_ALDN); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Release RF digital isolation. */ error = urtwn_write_2(sc, R92C_SYS_ISO_CTRL, urtwn_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Initialize MAC. */ error = urtwn_write_1(sc, R92C_APSD_CTRL, urtwn_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); for (ntries = 0; ntries < 200; ntries++) { if (!(urtwn_read_1(sc, R92C_APSD_CTRL) & R92C_APSD_CTRL_OFF_STATUS)) break; urtwn_ms_delay(sc); } if (ntries == 200) { device_printf(sc->sc_dev, "timeout waiting for MAC initialization\n"); return (ETIMEDOUT); } /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ reg = urtwn_read_2(sc, R92C_CR); reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN | R92C_CR_ENSEC; error = urtwn_write_2(sc, R92C_CR, reg); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); error = urtwn_write_1(sc, 0xfe10, 0x19); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); return (0); } static int urtwn_r88e_power_on(struct urtwn_softc *sc) { uint32_t reg; usb_error_t error; int ntries; /* Wait for power ready bit. */ for (ntries = 0; ntries < 5000; ntries++) { if (urtwn_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST) break; urtwn_ms_delay(sc); } if (ntries == 5000) { device_printf(sc->sc_dev, "timeout waiting for chip power up\n"); return (ETIMEDOUT); } /* Reset BB. */ error = urtwn_write_1(sc, R92C_SYS_FUNC_EN, urtwn_read_1(sc, R92C_SYS_FUNC_EN) & ~(R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST)); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); error = urtwn_write_1(sc, R92C_AFE_XTAL_CTRL + 2, urtwn_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Disable HWPDN. */ error = urtwn_write_2(sc, R92C_APS_FSMCO, urtwn_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Disable WL suspend. */ error = urtwn_write_2(sc, R92C_APS_FSMCO, urtwn_read_2(sc, R92C_APS_FSMCO) & ~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE)); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); error = urtwn_write_2(sc, R92C_APS_FSMCO, urtwn_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); for (ntries = 0; ntries < 5000; ntries++) { if (!(urtwn_read_2(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_APFM_ONMAC)) break; urtwn_ms_delay(sc); } if (ntries == 5000) return (ETIMEDOUT); /* Enable LDO normal mode. */ error = urtwn_write_1(sc, R92C_LPLDO_CTRL, urtwn_read_1(sc, R92C_LPLDO_CTRL) & ~R92C_LPLDO_CTRL_SLEEP); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ error = urtwn_write_2(sc, R92C_CR, 0); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); reg = urtwn_read_2(sc, R92C_CR); reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | R92C_CR_SCHEDULE_EN | R92C_CR_ENSEC | R92C_CR_CALTMR_EN; error = urtwn_write_2(sc, R92C_CR, reg); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); return (0); } static __inline void urtwn_power_off(struct urtwn_softc *sc) { return sc->sc_power_off(sc); } static void urtwn_r92c_power_off(struct urtwn_softc *sc) { uint32_t reg; /* Block all Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_ALL); /* Disable RF */ urtwn_rf_write(sc, 0, 0, 0); urtwn_write_1(sc, R92C_APSD_CTRL, R92C_APSD_CTRL_OFF); /* Reset BB state machine */ urtwn_write_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_USBD | R92C_SYS_FUNC_EN_USBA | R92C_SYS_FUNC_EN_BB_GLB_RST); urtwn_write_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_USBD | R92C_SYS_FUNC_EN_USBA); /* * Reset digital sequence */ #ifndef URTWN_WITHOUT_UCODE if (urtwn_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RDY) { /* Reset MCU ready status */ urtwn_write_1(sc, R92C_MCUFWDL, 0); /* If firmware in ram code, do reset */ urtwn_fw_reset(sc); } #endif /* Reset MAC and Enable 8051 */ urtwn_write_1(sc, R92C_SYS_FUNC_EN + 1, (R92C_SYS_FUNC_EN_CPUEN | R92C_SYS_FUNC_EN_ELDR | R92C_SYS_FUNC_EN_HWPDN) >> 8); /* Reset MCU ready status */ urtwn_write_1(sc, R92C_MCUFWDL, 0); /* Disable MAC clock */ urtwn_write_2(sc, R92C_SYS_CLKR, R92C_SYS_CLKR_ANAD16V_EN | R92C_SYS_CLKR_ANA8M | R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_80M_SSC_DIS | R92C_SYS_CLKR_SYS_EN | R92C_SYS_CLKR_RING_EN | 0x4000); /* Disable AFE PLL */ urtwn_write_1(sc, R92C_AFE_PLL_CTRL, 0x80); /* Gated AFE DIG_CLOCK */ urtwn_write_2(sc, R92C_AFE_XTAL_CTRL, 0x880F); /* Isolated digital to PON */ urtwn_write_1(sc, R92C_SYS_ISO_CTRL, R92C_SYS_ISO_CTRL_MD2PP | R92C_SYS_ISO_CTRL_PA2PCIE | R92C_SYS_ISO_CTRL_PD2CORE | R92C_SYS_ISO_CTRL_IP2MAC | R92C_SYS_ISO_CTRL_DIOP | R92C_SYS_ISO_CTRL_DIOE); /* * Pull GPIO PIN to balance level and LED control */ /* 1. Disable GPIO[7:0] */ urtwn_write_2(sc, R92C_GPIO_IOSEL, 0x0000); reg = urtwn_read_4(sc, R92C_GPIO_PIN_CTRL) & ~0x0000ff00; reg |= ((reg << 8) & 0x0000ff00) | 0x00ff0000; urtwn_write_4(sc, R92C_GPIO_PIN_CTRL, reg); /* Disable GPIO[10:8] */ urtwn_write_1(sc, R92C_MAC_PINMUX_CFG, 0x00); reg = urtwn_read_2(sc, R92C_GPIO_IO_SEL) & ~0x00f0; reg |= (((reg & 0x000f) << 4) | 0x0780); urtwn_write_2(sc, R92C_GPIO_IO_SEL, reg); /* Disable LED0 & 1 */ urtwn_write_2(sc, R92C_LEDCFG0, 0x8080); /* * Reset digital sequence */ /* Disable ELDR clock */ urtwn_write_2(sc, R92C_SYS_CLKR, R92C_SYS_CLKR_ANAD16V_EN | R92C_SYS_CLKR_ANA8M | R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_80M_SSC_DIS | R92C_SYS_CLKR_SYS_EN | R92C_SYS_CLKR_RING_EN | 0x4000); /* Isolated ELDR to PON */ urtwn_write_1(sc, R92C_SYS_ISO_CTRL + 1, (R92C_SYS_ISO_CTRL_DIOR | R92C_SYS_ISO_CTRL_PWC_EV12V) >> 8); /* * Disable analog sequence */ /* Disable A15 power */ urtwn_write_1(sc, R92C_LDOA15_CTRL, R92C_LDOA15_CTRL_OBUF); /* Disable digital core power */ urtwn_write_1(sc, R92C_LDOV12D_CTRL, urtwn_read_1(sc, R92C_LDOV12D_CTRL) & ~R92C_LDOV12D_CTRL_LDV12_EN); /* Enter PFM mode */ urtwn_write_1(sc, R92C_SPS0_CTRL, 0x23); /* Set USB suspend */ urtwn_write_2(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_APDM_HOST | R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_PFM_ALDN); /* Lock ISO/CLK/Power control register. */ urtwn_write_1(sc, R92C_RSV_CTRL, 0x0E); } static void urtwn_r88e_power_off(struct urtwn_softc *sc) { uint8_t reg; int ntries; /* Disable any kind of TX reports. */ urtwn_write_1(sc, R88E_TX_RPT_CTRL, urtwn_read_1(sc, R88E_TX_RPT_CTRL) & ~(R88E_TX_RPT1_ENA | R88E_TX_RPT2_ENA)); /* Stop Rx. */ urtwn_write_1(sc, R92C_CR, 0); /* Move card to Low Power State. */ /* Block all Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_ALL); for (ntries = 0; ntries < 20; ntries++) { /* Should be zero if no packet is transmitting. */ if (urtwn_read_4(sc, R88E_SCH_TXCMD) == 0) break; urtwn_ms_delay(sc); } if (ntries == 20) { device_printf(sc->sc_dev, "%s: failed to block Tx queues\n", __func__); return; } /* CCK and OFDM are disabled, and clock are gated. */ urtwn_write_1(sc, R92C_SYS_FUNC_EN, urtwn_read_1(sc, R92C_SYS_FUNC_EN) & ~R92C_SYS_FUNC_EN_BBRSTB); urtwn_ms_delay(sc); /* Reset MAC TRX */ urtwn_write_1(sc, R92C_CR, R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | R92C_CR_SCHEDULE_EN); /* check if removed later */ urtwn_write_1(sc, R92C_CR + 1, urtwn_read_1(sc, R92C_CR + 1) & ~(R92C_CR_ENSEC >> 8)); /* Respond TxOK to scheduler */ urtwn_write_1(sc, R92C_DUAL_TSF_RST, urtwn_read_1(sc, R92C_DUAL_TSF_RST) | 0x20); /* If firmware in ram code, do reset. */ #ifndef URTWN_WITHOUT_UCODE if (urtwn_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RDY) urtwn_r88e_fw_reset(sc); #endif /* Reset MCU ready status. */ urtwn_write_1(sc, R92C_MCUFWDL, 0x00); /* Disable 32k. */ urtwn_write_1(sc, R88E_32K_CTRL, urtwn_read_1(sc, R88E_32K_CTRL) & ~0x01); /* Move card to Disabled state. */ /* Turn off RF. */ urtwn_write_1(sc, R92C_RF_CTRL, 0); /* LDO Sleep mode. */ urtwn_write_1(sc, R92C_LPLDO_CTRL, urtwn_read_1(sc, R92C_LPLDO_CTRL) | R92C_LPLDO_CTRL_SLEEP); /* Turn off MAC by HW state machine */ urtwn_write_1(sc, R92C_APS_FSMCO + 1, urtwn_read_1(sc, R92C_APS_FSMCO + 1) | (R92C_APS_FSMCO_APFM_OFF >> 8)); for (ntries = 0; ntries < 20; ntries++) { /* Wait until it will be disabled. */ if ((urtwn_read_1(sc, R92C_APS_FSMCO + 1) & (R92C_APS_FSMCO_APFM_OFF >> 8)) == 0) break; urtwn_ms_delay(sc); } if (ntries == 20) { device_printf(sc->sc_dev, "%s: could not turn off MAC\n", __func__); return; } /* schmit trigger */ urtwn_write_1(sc, R92C_AFE_XTAL_CTRL + 2, urtwn_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80); /* Enable WL suspend. */ urtwn_write_1(sc, R92C_APS_FSMCO + 1, (urtwn_read_1(sc, R92C_APS_FSMCO + 1) & ~0x10) | 0x08); /* Enable bandgap mbias in suspend. */ urtwn_write_1(sc, R92C_APS_FSMCO + 3, 0); /* Clear SIC_EN register. */ urtwn_write_1(sc, R92C_GPIO_MUXCFG + 1, urtwn_read_1(sc, R92C_GPIO_MUXCFG + 1) & ~0x10); /* Set USB suspend enable local register */ urtwn_write_1(sc, R92C_USB_SUSPEND, urtwn_read_1(sc, R92C_USB_SUSPEND) | 0x10); /* Reset MCU IO Wrapper. */ reg = urtwn_read_1(sc, R92C_RSV_CTRL + 1); urtwn_write_1(sc, R92C_RSV_CTRL + 1, reg & ~0x08); urtwn_write_1(sc, R92C_RSV_CTRL + 1, reg | 0x08); /* marked as 'For Power Consumption' code. */ urtwn_write_1(sc, R92C_GPIO_OUT, urtwn_read_1(sc, R92C_GPIO_IN)); urtwn_write_1(sc, R92C_GPIO_IOSEL, 0xff); urtwn_write_1(sc, R92C_GPIO_IO_SEL, urtwn_read_1(sc, R92C_GPIO_IO_SEL) << 4); urtwn_write_1(sc, R92C_GPIO_MOD, urtwn_read_1(sc, R92C_GPIO_MOD) | 0x0f); /* Set LNA, TRSW, EX_PA Pin to output mode. */ urtwn_write_4(sc, R88E_BB_PAD_CTRL, 0x00080808); } static int urtwn_llt_init(struct urtwn_softc *sc) { int i, error, page_count, pktbuf_count; page_count = (sc->chip & URTWN_CHIP_88E) ? R88E_TX_PAGE_COUNT : R92C_TX_PAGE_COUNT; pktbuf_count = (sc->chip & URTWN_CHIP_88E) ? R88E_TXPKTBUF_COUNT : R92C_TXPKTBUF_COUNT; /* Reserve pages [0; page_count]. */ for (i = 0; i < page_count; i++) { if ((error = urtwn_llt_write(sc, i, i + 1)) != 0) return (error); } /* NB: 0xff indicates end-of-list. */ if ((error = urtwn_llt_write(sc, i, 0xff)) != 0) return (error); /* * Use pages [page_count + 1; pktbuf_count - 1] * as ring buffer. */ for (++i; i < pktbuf_count - 1; i++) { if ((error = urtwn_llt_write(sc, i, i + 1)) != 0) return (error); } /* Make the last page point to the beginning of the ring buffer. */ error = urtwn_llt_write(sc, i, page_count + 1); return (error); } #ifndef URTWN_WITHOUT_UCODE static void urtwn_fw_reset(struct urtwn_softc *sc) { uint16_t reg; int ntries; /* Tell 8051 to reset itself. */ urtwn_write_1(sc, R92C_HMETFR + 3, 0x20); /* Wait until 8051 resets by itself. */ for (ntries = 0; ntries < 100; ntries++) { reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN); if (!(reg & R92C_SYS_FUNC_EN_CPUEN)) return; urtwn_ms_delay(sc); } /* Force 8051 reset. */ urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg & ~R92C_SYS_FUNC_EN_CPUEN); } static void urtwn_r88e_fw_reset(struct urtwn_softc *sc) { uint16_t reg; reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN); urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg & ~R92C_SYS_FUNC_EN_CPUEN); urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg | R92C_SYS_FUNC_EN_CPUEN); } static int urtwn_fw_loadpage(struct urtwn_softc *sc, int page, const uint8_t *buf, int len) { uint32_t reg; usb_error_t error = USB_ERR_NORMAL_COMPLETION; int off, mlen; reg = urtwn_read_4(sc, R92C_MCUFWDL); reg = RW(reg, R92C_MCUFWDL_PAGE, page); urtwn_write_4(sc, R92C_MCUFWDL, reg); off = R92C_FW_START_ADDR; while (len > 0) { if (len > 196) mlen = 196; else if (len > 4) mlen = 4; else mlen = 1; /* XXX fix this deconst */ error = urtwn_write_region_1(sc, off, __DECONST(uint8_t *, buf), mlen); if (error != USB_ERR_NORMAL_COMPLETION) break; off += mlen; buf += mlen; len -= mlen; } return (error); } static int urtwn_load_firmware(struct urtwn_softc *sc) { const struct firmware *fw; const struct r92c_fw_hdr *hdr; const char *imagename; const u_char *ptr; size_t len; uint32_t reg; int mlen, ntries, page, error; URTWN_UNLOCK(sc); /* Read firmware image from the filesystem. */ if (sc->chip & URTWN_CHIP_88E) imagename = "urtwn-rtl8188eufw"; else if ((sc->chip & (URTWN_CHIP_UMC_A_CUT | URTWN_CHIP_92C)) == URTWN_CHIP_UMC_A_CUT) imagename = "urtwn-rtl8192cfwU"; else imagename = "urtwn-rtl8192cfwT"; fw = firmware_get(imagename); URTWN_LOCK(sc); if (fw == NULL) { device_printf(sc->sc_dev, "failed loadfirmware of file %s\n", imagename); return (ENOENT); } len = fw->datasize; if (len < sizeof(*hdr)) { device_printf(sc->sc_dev, "firmware too short\n"); error = EINVAL; goto fail; } ptr = fw->data; hdr = (const struct r92c_fw_hdr *)ptr; /* Check if there is a valid FW header and skip it. */ if ((le16toh(hdr->signature) >> 4) == 0x88c || (le16toh(hdr->signature) >> 4) == 0x88e || (le16toh(hdr->signature) >> 4) == 0x92c) { URTWN_DPRINTF(sc, URTWN_DEBUG_FIRMWARE, "FW V%d.%d %02d-%02d %02d:%02d\n", le16toh(hdr->version), le16toh(hdr->subversion), hdr->month, hdr->date, hdr->hour, hdr->minute); ptr += sizeof(*hdr); len -= sizeof(*hdr); } if (urtwn_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL) { if (sc->chip & URTWN_CHIP_88E) urtwn_r88e_fw_reset(sc); else urtwn_fw_reset(sc); urtwn_write_1(sc, R92C_MCUFWDL, 0); } if (!(sc->chip & URTWN_CHIP_88E)) { urtwn_write_2(sc, R92C_SYS_FUNC_EN, urtwn_read_2(sc, R92C_SYS_FUNC_EN) | R92C_SYS_FUNC_EN_CPUEN); } urtwn_write_1(sc, R92C_MCUFWDL, urtwn_read_1(sc, R92C_MCUFWDL) | R92C_MCUFWDL_EN); urtwn_write_1(sc, R92C_MCUFWDL + 2, urtwn_read_1(sc, R92C_MCUFWDL + 2) & ~0x08); /* Reset the FWDL checksum. */ urtwn_write_1(sc, R92C_MCUFWDL, urtwn_read_1(sc, R92C_MCUFWDL) | R92C_MCUFWDL_CHKSUM_RPT); for (page = 0; len > 0; page++) { mlen = min(len, R92C_FW_PAGE_SIZE); error = urtwn_fw_loadpage(sc, page, ptr, mlen); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware page\n"); goto fail; } ptr += mlen; len -= mlen; } urtwn_write_1(sc, R92C_MCUFWDL, urtwn_read_1(sc, R92C_MCUFWDL) & ~R92C_MCUFWDL_EN); urtwn_write_1(sc, R92C_MCUFWDL + 1, 0); /* Wait for checksum report. */ for (ntries = 0; ntries < 1000; ntries++) { if (urtwn_read_4(sc, R92C_MCUFWDL) & R92C_MCUFWDL_CHKSUM_RPT) break; urtwn_ms_delay(sc); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for checksum report\n"); error = ETIMEDOUT; goto fail; } reg = urtwn_read_4(sc, R92C_MCUFWDL); reg = (reg & ~R92C_MCUFWDL_WINTINI_RDY) | R92C_MCUFWDL_RDY; urtwn_write_4(sc, R92C_MCUFWDL, reg); if (sc->chip & URTWN_CHIP_88E) urtwn_r88e_fw_reset(sc); /* Wait for firmware readiness. */ for (ntries = 0; ntries < 1000; ntries++) { if (urtwn_read_4(sc, R92C_MCUFWDL) & R92C_MCUFWDL_WINTINI_RDY) break; urtwn_ms_delay(sc); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for firmware readiness\n"); error = ETIMEDOUT; goto fail; } fail: firmware_put(fw, FIRMWARE_UNLOAD); return (error); } #endif static int urtwn_dma_init(struct urtwn_softc *sc) { struct usb_endpoint *ep, *ep_end; usb_error_t usb_err; uint32_t reg; int hashq, hasnq, haslq, nqueues, ntx; int error, pagecount, npubqpages, nqpages, nrempages, tx_boundary; /* Initialize LLT table. */ error = urtwn_llt_init(sc); if (error != 0) return (error); /* Determine the number of bulk-out pipes. */ ntx = 0; ep = sc->sc_udev->endpoints; ep_end = sc->sc_udev->endpoints + sc->sc_udev->endpoints_max; for (; ep != ep_end; ep++) { if ((ep->edesc == NULL) || (ep->iface_index != sc->sc_iface_index)) continue; if (UE_GET_DIR(ep->edesc->bEndpointAddress) == UE_DIR_OUT) ntx++; } if (ntx == 0) { device_printf(sc->sc_dev, "%d: invalid number of Tx bulk pipes\n", ntx); return (EIO); } /* Get Tx queues to USB endpoints mapping. */ hashq = hasnq = haslq = nqueues = 0; switch (ntx) { case 1: hashq = 1; break; case 2: hashq = hasnq = 1; break; case 3: case 4: hashq = hasnq = haslq = 1; break; } nqueues = hashq + hasnq + haslq; if (nqueues == 0) return (EIO); npubqpages = nqpages = nrempages = pagecount = 0; if (sc->chip & URTWN_CHIP_88E) tx_boundary = R88E_TX_PAGE_BOUNDARY; else { pagecount = R92C_TX_PAGE_COUNT; npubqpages = R92C_PUBQ_NPAGES; tx_boundary = R92C_TX_PAGE_BOUNDARY; } /* Set number of pages for normal priority queue. */ if (sc->chip & URTWN_CHIP_88E) { usb_err = urtwn_write_2(sc, R92C_RQPN_NPQ, 0xd); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_4(sc, R92C_RQPN, 0x808e000d); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); } else { /* Get the number of pages for each queue. */ nqpages = (pagecount - npubqpages) / nqueues; /* * The remaining pages are assigned to the high priority * queue. */ nrempages = (pagecount - npubqpages) % nqueues; usb_err = urtwn_write_1(sc, R92C_RQPN_NPQ, hasnq ? nqpages : 0); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_4(sc, R92C_RQPN, /* Set number of pages for public queue. */ SM(R92C_RQPN_PUBQ, npubqpages) | /* Set number of pages for high priority queue. */ SM(R92C_RQPN_HPQ, hashq ? nqpages + nrempages : 0) | /* Set number of pages for low priority queue. */ SM(R92C_RQPN_LPQ, haslq ? nqpages : 0) | /* Load values. */ R92C_RQPN_LD); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); } usb_err = urtwn_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, tx_boundary); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, tx_boundary); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD, tx_boundary); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_1(sc, R92C_TRXFF_BNDY, tx_boundary); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); usb_err = urtwn_write_1(sc, R92C_TDECTRL + 1, tx_boundary); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Set queue to USB pipe mapping. */ reg = urtwn_read_2(sc, R92C_TRXDMA_CTRL); reg &= ~R92C_TRXDMA_CTRL_QMAP_M; if (nqueues == 1) { if (hashq) reg |= R92C_TRXDMA_CTRL_QMAP_HQ; else if (hasnq) reg |= R92C_TRXDMA_CTRL_QMAP_NQ; else reg |= R92C_TRXDMA_CTRL_QMAP_LQ; } else if (nqueues == 2) { /* * All 2-endpoints configs have high and normal * priority queues. */ reg |= R92C_TRXDMA_CTRL_QMAP_HQ_NQ; } else reg |= R92C_TRXDMA_CTRL_QMAP_3EP; usb_err = urtwn_write_2(sc, R92C_TRXDMA_CTRL, reg); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Set Tx/Rx transfer page boundary. */ usb_err = urtwn_write_2(sc, R92C_TRXFF_BNDY + 2, (sc->chip & URTWN_CHIP_88E) ? 0x23ff : 0x27ff); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); /* Set Tx/Rx transfer page size. */ usb_err = urtwn_write_1(sc, R92C_PBP, SM(R92C_PBP_PSRX, R92C_PBP_128) | SM(R92C_PBP_PSTX, R92C_PBP_128)); if (usb_err != USB_ERR_NORMAL_COMPLETION) return (EIO); return (0); } static int urtwn_mac_init(struct urtwn_softc *sc) { usb_error_t error; int i; /* Write MAC initialization values. */ if (sc->chip & URTWN_CHIP_88E) { for (i = 0; i < nitems(rtl8188eu_mac); i++) { error = urtwn_write_1(sc, rtl8188eu_mac[i].reg, rtl8188eu_mac[i].val); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } urtwn_write_1(sc, R92C_MAX_AGGR_NUM, 0x07); } else { for (i = 0; i < nitems(rtl8192cu_mac); i++) error = urtwn_write_1(sc, rtl8192cu_mac[i].reg, rtl8192cu_mac[i].val); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); } return (0); } static void urtwn_bb_init(struct urtwn_softc *sc) { const struct urtwn_bb_prog *prog; uint32_t reg; uint8_t crystalcap; int i; /* Enable BB and RF. */ urtwn_write_2(sc, R92C_SYS_FUNC_EN, urtwn_read_2(sc, R92C_SYS_FUNC_EN) | R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST | R92C_SYS_FUNC_EN_DIO_RF); if (!(sc->chip & URTWN_CHIP_88E)) urtwn_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83); urtwn_write_1(sc, R92C_RF_CTRL, R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB); urtwn_write_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_USBA | R92C_SYS_FUNC_EN_USBD | R92C_SYS_FUNC_EN_BB_GLB_RST | R92C_SYS_FUNC_EN_BBRSTB); if (!(sc->chip & URTWN_CHIP_88E)) { urtwn_write_1(sc, R92C_LDOHCI12_CTRL, 0x0f); urtwn_write_1(sc, 0x15, 0xe9); urtwn_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80); } /* Select BB programming based on board type. */ if (sc->chip & URTWN_CHIP_88E) prog = &rtl8188eu_bb_prog; else if (!(sc->chip & URTWN_CHIP_92C)) { if (sc->board_type == R92C_BOARD_TYPE_MINICARD) prog = &rtl8188ce_bb_prog; else if (sc->board_type == R92C_BOARD_TYPE_HIGHPA) prog = &rtl8188ru_bb_prog; else prog = &rtl8188cu_bb_prog; } else { if (sc->board_type == R92C_BOARD_TYPE_MINICARD) prog = &rtl8192ce_bb_prog; else prog = &rtl8192cu_bb_prog; } /* Write BB initialization values. */ for (i = 0; i < prog->count; i++) { urtwn_bb_write(sc, prog->regs[i], prog->vals[i]); urtwn_ms_delay(sc); } if (sc->chip & URTWN_CHIP_92C_1T2R) { /* 8192C 1T only configuration. */ reg = urtwn_bb_read(sc, R92C_FPGA0_TXINFO); reg = (reg & ~0x00000003) | 0x2; urtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg); reg = urtwn_bb_read(sc, R92C_FPGA1_TXINFO); reg = (reg & ~0x00300033) | 0x00200022; urtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg); reg = urtwn_bb_read(sc, R92C_CCK0_AFESETTING); reg = (reg & ~0xff000000) | 0x45 << 24; urtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg); reg = urtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA); reg = (reg & ~0x000000ff) | 0x23; urtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg); reg = urtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1); reg = (reg & ~0x00000030) | 1 << 4; urtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg); reg = urtwn_bb_read(sc, 0xe74); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe74, reg); reg = urtwn_bb_read(sc, 0xe78); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe78, reg); reg = urtwn_bb_read(sc, 0xe7c); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe7c, reg); reg = urtwn_bb_read(sc, 0xe80); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe80, reg); reg = urtwn_bb_read(sc, 0xe88); reg = (reg & ~0x0c000000) | 2 << 26; urtwn_bb_write(sc, 0xe88, reg); } /* Write AGC values. */ for (i = 0; i < prog->agccount; i++) { urtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE, prog->agcvals[i]); urtwn_ms_delay(sc); } if (sc->chip & URTWN_CHIP_88E) { urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), 0x69553422); urtwn_ms_delay(sc); urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), 0x69553420); urtwn_ms_delay(sc); crystalcap = sc->rom.r88e_rom.crystalcap; if (crystalcap == 0xff) crystalcap = 0x20; crystalcap &= 0x3f; reg = urtwn_bb_read(sc, R92C_AFE_XTAL_CTRL); urtwn_bb_write(sc, R92C_AFE_XTAL_CTRL, RW(reg, R92C_AFE_XTAL_CTRL_ADDR, crystalcap | crystalcap << 6)); } else { if (urtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & R92C_HSSI_PARAM2_CCK_HIPWR) sc->sc_flags |= URTWN_FLAG_CCK_HIPWR; } } static void urtwn_rf_init(struct urtwn_softc *sc) { const struct urtwn_rf_prog *prog; uint32_t reg, type; int i, j, idx, off; /* Select RF programming based on board type. */ if (sc->chip & URTWN_CHIP_88E) prog = rtl8188eu_rf_prog; else if (!(sc->chip & URTWN_CHIP_92C)) { if (sc->board_type == R92C_BOARD_TYPE_MINICARD) prog = rtl8188ce_rf_prog; else if (sc->board_type == R92C_BOARD_TYPE_HIGHPA) prog = rtl8188ru_rf_prog; else prog = rtl8188cu_rf_prog; } else prog = rtl8192ce_rf_prog; for (i = 0; i < sc->nrxchains; i++) { /* Save RF_ENV control type. */ idx = i / 2; off = (i % 2) * 16; reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACESW(idx)); type = (reg >> off) & 0x10; /* Set RF_ENV enable. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACEOE(i)); reg |= 0x100000; urtwn_bb_write(sc, R92C_FPGA0_RFIFACEOE(i), reg); urtwn_ms_delay(sc); /* Set RF_ENV output high. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACEOE(i)); reg |= 0x10; urtwn_bb_write(sc, R92C_FPGA0_RFIFACEOE(i), reg); urtwn_ms_delay(sc); /* Set address and data lengths of RF registers. */ reg = urtwn_bb_read(sc, R92C_HSSI_PARAM2(i)); reg &= ~R92C_HSSI_PARAM2_ADDR_LENGTH; urtwn_bb_write(sc, R92C_HSSI_PARAM2(i), reg); urtwn_ms_delay(sc); reg = urtwn_bb_read(sc, R92C_HSSI_PARAM2(i)); reg &= ~R92C_HSSI_PARAM2_DATA_LENGTH; urtwn_bb_write(sc, R92C_HSSI_PARAM2(i), reg); urtwn_ms_delay(sc); /* Write RF initialization values for this chain. */ for (j = 0; j < prog[i].count; j++) { if (prog[i].regs[j] >= 0xf9 && prog[i].regs[j] <= 0xfe) { /* * These are fake RF registers offsets that * indicate a delay is required. */ usb_pause_mtx(&sc->sc_mtx, hz / 20); /* 50ms */ continue; } urtwn_rf_write(sc, i, prog[i].regs[j], prog[i].vals[j]); urtwn_ms_delay(sc); } /* Restore RF_ENV control type. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACESW(idx)); reg &= ~(0x10 << off) | (type << off); urtwn_bb_write(sc, R92C_FPGA0_RFIFACESW(idx), reg); /* Cache RF register CHNLBW. */ sc->rf_chnlbw[i] = urtwn_rf_read(sc, i, R92C_RF_CHNLBW); } if ((sc->chip & (URTWN_CHIP_UMC_A_CUT | URTWN_CHIP_92C)) == URTWN_CHIP_UMC_A_CUT) { urtwn_rf_write(sc, 0, R92C_RF_RX_G1, 0x30255); urtwn_rf_write(sc, 0, R92C_RF_RX_G2, 0x50a00); } } static void urtwn_cam_init(struct urtwn_softc *sc) { /* Invalidate all CAM entries. */ urtwn_write_4(sc, R92C_CAMCMD, R92C_CAMCMD_POLLING | R92C_CAMCMD_CLR); } static int urtwn_cam_write(struct urtwn_softc *sc, uint32_t addr, uint32_t data) { usb_error_t error; error = urtwn_write_4(sc, R92C_CAMWRITE, data); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); error = urtwn_write_4(sc, R92C_CAMCMD, R92C_CAMCMD_POLLING | R92C_CAMCMD_WRITE | SM(R92C_CAMCMD_ADDR, addr)); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); return (0); } static void urtwn_pa_bias_init(struct urtwn_softc *sc) { uint8_t reg; int i; for (i = 0; i < sc->nrxchains; i++) { if (sc->pa_setting & (1 << i)) continue; urtwn_rf_write(sc, i, R92C_RF_IPA, 0x0f406); urtwn_rf_write(sc, i, R92C_RF_IPA, 0x4f406); urtwn_rf_write(sc, i, R92C_RF_IPA, 0x8f406); urtwn_rf_write(sc, i, R92C_RF_IPA, 0xcf406); } if (!(sc->pa_setting & 0x10)) { reg = urtwn_read_1(sc, 0x16); reg = (reg & ~0xf0) | 0x90; urtwn_write_1(sc, 0x16, reg); } } static void urtwn_rxfilter_init(struct urtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t rcr; uint16_t filter; URTWN_ASSERT_LOCKED(sc); /* Accept all multicast frames. */ urtwn_write_4(sc, R92C_MAR + 0, 0xffffffff); urtwn_write_4(sc, R92C_MAR + 4, 0xffffffff); /* Filter for management frames. */ filter = 0x7f3f; switch (vap->iv_opmode) { case IEEE80211_M_STA: filter &= ~( R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_ASSOC_REQ) | R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_REASSOC_REQ) | R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_PROBE_REQ)); break; case IEEE80211_M_HOSTAP: filter &= ~( R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_ASSOC_RESP) | R92C_RXFLTMAP_SUBTYPE(IEEE80211_FC0_SUBTYPE_REASSOC_RESP)); break; case IEEE80211_M_MONITOR: case IEEE80211_M_IBSS: break; default: device_printf(sc->sc_dev, "%s: undefined opmode %d\n", __func__, vap->iv_opmode); break; } urtwn_write_2(sc, R92C_RXFLTMAP0, filter); /* Reject all control frames. */ urtwn_write_2(sc, R92C_RXFLTMAP1, 0x0000); /* Reject all data frames. */ urtwn_write_2(sc, R92C_RXFLTMAP2, 0x0000); rcr = R92C_RCR_AM | R92C_RCR_AB | R92C_RCR_APM | R92C_RCR_HTC_LOC_CTRL | R92C_RCR_APP_PHYSTS | R92C_RCR_APP_ICV | R92C_RCR_APP_MIC; if (vap->iv_opmode == IEEE80211_M_MONITOR) { /* Accept all frames. */ rcr |= R92C_RCR_ACF | R92C_RCR_ADF | R92C_RCR_AMF | R92C_RCR_AAP; } /* Set Rx filter. */ urtwn_write_4(sc, R92C_RCR, rcr); if (ic->ic_promisc != 0) { /* Update Rx filter. */ urtwn_set_promisc(sc); } } static void urtwn_edca_init(struct urtwn_softc *sc) { urtwn_write_2(sc, R92C_SPEC_SIFS, 0x100a); urtwn_write_2(sc, R92C_MAC_SPEC_SIFS, 0x100a); urtwn_write_2(sc, R92C_SIFS_CCK, 0x100a); urtwn_write_2(sc, R92C_SIFS_OFDM, 0x100a); urtwn_write_4(sc, R92C_EDCA_BE_PARAM, 0x005ea42b); urtwn_write_4(sc, R92C_EDCA_BK_PARAM, 0x0000a44f); urtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005ea324); urtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002fa226); } static void urtwn_write_txpower(struct urtwn_softc *sc, int chain, uint16_t power[URTWN_RIDX_COUNT]) { uint32_t reg; /* Write per-CCK rate Tx power. */ if (chain == 0) { reg = urtwn_bb_read(sc, R92C_TXAGC_A_CCK1_MCS32); reg = RW(reg, R92C_TXAGC_A_CCK1, power[0]); urtwn_bb_write(sc, R92C_TXAGC_A_CCK1_MCS32, reg); reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK11_A_CCK2_11); reg = RW(reg, R92C_TXAGC_A_CCK2, power[1]); reg = RW(reg, R92C_TXAGC_A_CCK55, power[2]); reg = RW(reg, R92C_TXAGC_A_CCK11, power[3]); urtwn_bb_write(sc, R92C_TXAGC_B_CCK11_A_CCK2_11, reg); } else { reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK1_55_MCS32); reg = RW(reg, R92C_TXAGC_B_CCK1, power[0]); reg = RW(reg, R92C_TXAGC_B_CCK2, power[1]); reg = RW(reg, R92C_TXAGC_B_CCK55, power[2]); urtwn_bb_write(sc, R92C_TXAGC_B_CCK1_55_MCS32, reg); reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK11_A_CCK2_11); reg = RW(reg, R92C_TXAGC_B_CCK11, power[3]); urtwn_bb_write(sc, R92C_TXAGC_B_CCK11_A_CCK2_11, reg); } /* Write per-OFDM rate Tx power. */ urtwn_bb_write(sc, R92C_TXAGC_RATE18_06(chain), SM(R92C_TXAGC_RATE06, power[ 4]) | SM(R92C_TXAGC_RATE09, power[ 5]) | SM(R92C_TXAGC_RATE12, power[ 6]) | SM(R92C_TXAGC_RATE18, power[ 7])); urtwn_bb_write(sc, R92C_TXAGC_RATE54_24(chain), SM(R92C_TXAGC_RATE24, power[ 8]) | SM(R92C_TXAGC_RATE36, power[ 9]) | SM(R92C_TXAGC_RATE48, power[10]) | SM(R92C_TXAGC_RATE54, power[11])); /* Write per-MCS Tx power. */ urtwn_bb_write(sc, R92C_TXAGC_MCS03_MCS00(chain), SM(R92C_TXAGC_MCS00, power[12]) | SM(R92C_TXAGC_MCS01, power[13]) | SM(R92C_TXAGC_MCS02, power[14]) | SM(R92C_TXAGC_MCS03, power[15])); urtwn_bb_write(sc, R92C_TXAGC_MCS07_MCS04(chain), SM(R92C_TXAGC_MCS04, power[16]) | SM(R92C_TXAGC_MCS05, power[17]) | SM(R92C_TXAGC_MCS06, power[18]) | SM(R92C_TXAGC_MCS07, power[19])); urtwn_bb_write(sc, R92C_TXAGC_MCS11_MCS08(chain), SM(R92C_TXAGC_MCS08, power[20]) | SM(R92C_TXAGC_MCS09, power[21]) | SM(R92C_TXAGC_MCS10, power[22]) | SM(R92C_TXAGC_MCS11, power[23])); urtwn_bb_write(sc, R92C_TXAGC_MCS15_MCS12(chain), SM(R92C_TXAGC_MCS12, power[24]) | SM(R92C_TXAGC_MCS13, power[25]) | SM(R92C_TXAGC_MCS14, power[26]) | SM(R92C_TXAGC_MCS15, power[27])); } static void urtwn_get_txpower(struct urtwn_softc *sc, int chain, struct ieee80211_channel *c, struct ieee80211_channel *extc, uint16_t power[URTWN_RIDX_COUNT]) { struct ieee80211com *ic = &sc->sc_ic; struct r92c_rom *rom = &sc->rom.r92c_rom; uint16_t cckpow, ofdmpow, htpow, diff, max; const struct urtwn_txpwr *base; int ridx, chan, group; /* Determine channel group. */ chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */ if (chan <= 3) group = 0; else if (chan <= 9) group = 1; else group = 2; /* Get original Tx power based on board type and RF chain. */ if (!(sc->chip & URTWN_CHIP_92C)) { if (sc->board_type == R92C_BOARD_TYPE_HIGHPA) base = &rtl8188ru_txagc[chain]; else base = &rtl8192cu_txagc[chain]; } else base = &rtl8192cu_txagc[chain]; memset(power, 0, URTWN_RIDX_COUNT * sizeof(power[0])); if (sc->regulatory == 0) { for (ridx = URTWN_RIDX_CCK1; ridx <= URTWN_RIDX_CCK11; ridx++) power[ridx] = base->pwr[0][ridx]; } for (ridx = URTWN_RIDX_OFDM6; ridx < URTWN_RIDX_COUNT; ridx++) { if (sc->regulatory == 3) { power[ridx] = base->pwr[0][ridx]; /* Apply vendor limits. */ if (extc != NULL) max = rom->ht40_max_pwr[group]; else max = rom->ht20_max_pwr[group]; max = (max >> (chain * 4)) & 0xf; if (power[ridx] > max) power[ridx] = max; } else if (sc->regulatory == 1) { if (extc == NULL) power[ridx] = base->pwr[group][ridx]; } else if (sc->regulatory != 2) power[ridx] = base->pwr[0][ridx]; } /* Compute per-CCK rate Tx power. */ cckpow = rom->cck_tx_pwr[chain][group]; for (ridx = URTWN_RIDX_CCK1; ridx <= URTWN_RIDX_CCK11; ridx++) { power[ridx] += cckpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } htpow = rom->ht40_1s_tx_pwr[chain][group]; if (sc->ntxchains > 1) { /* Apply reduction for 2 spatial streams. */ diff = rom->ht40_2s_tx_pwr_diff[group]; diff = (diff >> (chain * 4)) & 0xf; htpow = (htpow > diff) ? htpow - diff : 0; } /* Compute per-OFDM rate Tx power. */ diff = rom->ofdm_tx_pwr_diff[group]; diff = (diff >> (chain * 4)) & 0xf; ofdmpow = htpow + diff; /* HT->OFDM correction. */ for (ridx = URTWN_RIDX_OFDM6; ridx <= URTWN_RIDX_OFDM54; ridx++) { power[ridx] += ofdmpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } /* Compute per-MCS Tx power. */ if (extc == NULL) { diff = rom->ht20_tx_pwr_diff[group]; diff = (diff >> (chain * 4)) & 0xf; htpow += diff; /* HT40->HT20 correction. */ } for (ridx = 12; ridx <= 27; ridx++) { power[ridx] += htpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } #ifdef USB_DEBUG if (sc->sc_debug & URTWN_DEBUG_TXPWR) { /* Dump per-rate Tx power values. */ printf("Tx power for chain %d:\n", chain); for (ridx = URTWN_RIDX_CCK1; ridx < URTWN_RIDX_COUNT; ridx++) printf("Rate %d = %u\n", ridx, power[ridx]); } #endif } static void urtwn_r88e_get_txpower(struct urtwn_softc *sc, int chain, struct ieee80211_channel *c, struct ieee80211_channel *extc, uint16_t power[URTWN_RIDX_COUNT]) { struct ieee80211com *ic = &sc->sc_ic; struct r88e_rom *rom = &sc->rom.r88e_rom; uint16_t cckpow, ofdmpow, bw20pow, htpow; const struct urtwn_r88e_txpwr *base; int ridx, chan, group; /* Determine channel group. */ chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */ if (chan <= 2) group = 0; else if (chan <= 5) group = 1; else if (chan <= 8) group = 2; else if (chan <= 11) group = 3; else if (chan <= 13) group = 4; else group = 5; /* Get original Tx power based on board type and RF chain. */ base = &rtl8188eu_txagc[chain]; memset(power, 0, URTWN_RIDX_COUNT * sizeof(power[0])); if (sc->regulatory == 0) { for (ridx = URTWN_RIDX_CCK1; ridx <= URTWN_RIDX_CCK11; ridx++) power[ridx] = base->pwr[0][ridx]; } for (ridx = URTWN_RIDX_OFDM6; ridx < URTWN_RIDX_COUNT; ridx++) { if (sc->regulatory == 3) power[ridx] = base->pwr[0][ridx]; else if (sc->regulatory == 1) { if (extc == NULL) power[ridx] = base->pwr[group][ridx]; } else if (sc->regulatory != 2) power[ridx] = base->pwr[0][ridx]; } /* Compute per-CCK rate Tx power. */ cckpow = rom->cck_tx_pwr[group]; for (ridx = URTWN_RIDX_CCK1; ridx <= URTWN_RIDX_CCK11; ridx++) { power[ridx] += cckpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } htpow = rom->ht40_tx_pwr[group]; /* Compute per-OFDM rate Tx power. */ ofdmpow = htpow + sc->ofdm_tx_pwr_diff; for (ridx = URTWN_RIDX_OFDM6; ridx <= URTWN_RIDX_OFDM54; ridx++) { power[ridx] += ofdmpow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } bw20pow = htpow + sc->bw20_tx_pwr_diff; for (ridx = 12; ridx <= 27; ridx++) { power[ridx] += bw20pow; if (power[ridx] > R92C_MAX_TX_PWR) power[ridx] = R92C_MAX_TX_PWR; } } static void urtwn_set_txpower(struct urtwn_softc *sc, struct ieee80211_channel *c, struct ieee80211_channel *extc) { uint16_t power[URTWN_RIDX_COUNT]; int i; for (i = 0; i < sc->ntxchains; i++) { /* Compute per-rate Tx power values. */ if (sc->chip & URTWN_CHIP_88E) urtwn_r88e_get_txpower(sc, i, c, extc, power); else urtwn_get_txpower(sc, i, c, extc, power); /* Write per-rate Tx power values to hardware. */ urtwn_write_txpower(sc, i, power); } } static void urtwn_set_rx_bssid_all(struct urtwn_softc *sc, int enable) { uint32_t reg; reg = urtwn_read_4(sc, R92C_RCR); if (enable) reg &= ~R92C_RCR_CBSSID_BCN; else reg |= R92C_RCR_CBSSID_BCN; urtwn_write_4(sc, R92C_RCR, reg); } static void urtwn_set_gain(struct urtwn_softc *sc, uint8_t gain) { uint32_t reg; reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(0)); reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, gain); urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), reg); if (!(sc->chip & URTWN_CHIP_88E)) { reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(1)); reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, gain); urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(1), reg); } } static void urtwn_scan_start(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_softc; URTWN_LOCK(sc); /* Receive beacons / probe responses from any BSSID. */ if (ic->ic_opmode != IEEE80211_M_IBSS) urtwn_set_rx_bssid_all(sc, 1); /* Set gain for scanning. */ urtwn_set_gain(sc, 0x20); URTWN_UNLOCK(sc); } static void urtwn_scan_end(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_softc; URTWN_LOCK(sc); /* Restore limitations. */ if (ic->ic_promisc == 0 && ic->ic_opmode != IEEE80211_M_IBSS) urtwn_set_rx_bssid_all(sc, 0); /* Set gain under link. */ urtwn_set_gain(sc, 0x32); URTWN_UNLOCK(sc); } static void urtwn_set_channel(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_softc; struct ieee80211_channel *c = ic->ic_curchan; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); URTWN_LOCK(sc); if (vap->iv_state == IEEE80211_S_SCAN) { /* Make link LED blink during scan. */ urtwn_set_led(sc, URTWN_LED_LINK, !sc->ledlink); } urtwn_set_chan(sc, c, NULL); sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); URTWN_UNLOCK(sc); } static int urtwn_wme_update(struct ieee80211com *ic) { const struct wmeParams *wmep = ic->ic_wme.wme_chanParams.cap_wmeParams; struct urtwn_softc *sc = ic->ic_softc; uint8_t aifs, acm, slottime; int ac; acm = 0; slottime = IEEE80211_GET_SLOTTIME(ic); URTWN_LOCK(sc); for (ac = WME_AC_BE; ac < WME_NUM_AC; ac++) { /* AIFS[AC] = AIFSN[AC] * aSlotTime + aSIFSTime. */ aifs = wmep[ac].wmep_aifsn * slottime + IEEE80211_DUR_SIFS; urtwn_write_4(sc, wme2queue[ac].reg, SM(R92C_EDCA_PARAM_TXOP, wmep[ac].wmep_txopLimit) | SM(R92C_EDCA_PARAM_ECWMIN, wmep[ac].wmep_logcwmin) | SM(R92C_EDCA_PARAM_ECWMAX, wmep[ac].wmep_logcwmax) | SM(R92C_EDCA_PARAM_AIFS, aifs)); if (ac != WME_AC_BE) acm |= wmep[ac].wmep_acm << ac; } if (acm != 0) acm |= R92C_ACMHWCTRL_EN; urtwn_write_1(sc, R92C_ACMHWCTRL, (urtwn_read_1(sc, R92C_ACMHWCTRL) & ~R92C_ACMHWCTRL_ACM_MASK) | acm); URTWN_UNLOCK(sc); return 0; } static void urtwn_update_slot(struct ieee80211com *ic) { urtwn_cmd_sleepable(ic->ic_softc, NULL, 0, urtwn_update_slot_cb); } static void urtwn_update_slot_cb(struct urtwn_softc *sc, union sec_param *data) { struct ieee80211com *ic = &sc->sc_ic; uint8_t slottime; slottime = IEEE80211_GET_SLOTTIME(ic); URTWN_DPRINTF(sc, URTWN_DEBUG_ANY, "%s: setting slot time to %uus\n", __func__, slottime); urtwn_write_1(sc, R92C_SLOT, slottime); urtwn_update_aifs(sc, slottime); } static void urtwn_update_aifs(struct urtwn_softc *sc, uint8_t slottime) { const struct wmeParams *wmep = sc->sc_ic.ic_wme.wme_chanParams.cap_wmeParams; uint8_t aifs, ac; for (ac = WME_AC_BE; ac < WME_NUM_AC; ac++) { /* AIFS[AC] = AIFSN[AC] * aSlotTime + aSIFSTime. */ aifs = wmep[ac].wmep_aifsn * slottime + IEEE80211_DUR_SIFS; urtwn_write_1(sc, wme2queue[ac].reg, aifs); } } static void urtwn_set_promisc(struct urtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t rcr, mask1, mask2; URTWN_ASSERT_LOCKED(sc); if (vap->iv_opmode == IEEE80211_M_MONITOR) return; mask1 = R92C_RCR_ACF | R92C_RCR_ADF | R92C_RCR_AMF | R92C_RCR_AAP; mask2 = R92C_RCR_APM; if (vap->iv_state == IEEE80211_S_RUN) { switch (vap->iv_opmode) { case IEEE80211_M_STA: mask2 |= R92C_RCR_CBSSID_DATA; /* FALLTHROUGH */ case IEEE80211_M_HOSTAP: mask2 |= R92C_RCR_CBSSID_BCN; break; case IEEE80211_M_IBSS: mask2 |= R92C_RCR_CBSSID_DATA; break; default: device_printf(sc->sc_dev, "%s: undefined opmode %d\n", __func__, vap->iv_opmode); return; } } rcr = urtwn_read_4(sc, R92C_RCR); if (ic->ic_promisc == 0) rcr = (rcr & ~mask1) | mask2; else rcr = (rcr & ~mask2) | mask1; urtwn_write_4(sc, R92C_RCR, rcr); } static void urtwn_update_promisc(struct ieee80211com *ic) { struct urtwn_softc *sc = ic->ic_softc; URTWN_LOCK(sc); if (sc->sc_flags & URTWN_RUNNING) urtwn_set_promisc(sc); URTWN_UNLOCK(sc); } static void urtwn_update_mcast(struct ieee80211com *ic) { /* XXX do nothing? */ } static struct ieee80211_node * -urtwn_r88e_node_alloc(struct ieee80211vap *vap, +urtwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct urtwn_node *un; un = malloc(sizeof (struct urtwn_node), M_80211_NODE, M_NOWAIT | M_ZERO); if (un == NULL) return NULL; un->id = URTWN_MACID_UNDEFINED; return &un->ni; } static void -urtwn_r88e_newassoc(struct ieee80211_node *ni, int isnew) +urtwn_newassoc(struct ieee80211_node *ni, int isnew) { struct urtwn_softc *sc = ni->ni_ic->ic_softc; struct urtwn_node *un = URTWN_NODE(ni); uint8_t id; + /* Only do this bit for R88E chips */ + if (! (sc->chip & URTWN_CHIP_88E)) + return; + if (!isnew) return; URTWN_NT_LOCK(sc); for (id = 0; id <= URTWN_MACID_MAX(sc); id++) { if (id != URTWN_MACID_BC && sc->node_list[id] == NULL) { un->id = id; sc->node_list[id] = ni; break; } } URTWN_NT_UNLOCK(sc); if (id > URTWN_MACID_MAX(sc)) { device_printf(sc->sc_dev, "%s: node table is full\n", __func__); } } static void -urtwn_r88e_node_free(struct ieee80211_node *ni) +urtwn_node_free(struct ieee80211_node *ni) { struct urtwn_softc *sc = ni->ni_ic->ic_softc; struct urtwn_node *un = URTWN_NODE(ni); URTWN_NT_LOCK(sc); if (un->id != URTWN_MACID_UNDEFINED) sc->node_list[un->id] = NULL; URTWN_NT_UNLOCK(sc); sc->sc_node_free(ni); } static void urtwn_set_chan(struct urtwn_softc *sc, struct ieee80211_channel *c, struct ieee80211_channel *extc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t reg; u_int chan; int i; chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */ if (chan == 0 || chan == IEEE80211_CHAN_ANY) { device_printf(sc->sc_dev, "%s: invalid channel %x\n", __func__, chan); return; } /* Set Tx power for this new channel. */ urtwn_set_txpower(sc, c, extc); for (i = 0; i < sc->nrxchains; i++) { urtwn_rf_write(sc, i, R92C_RF_CHNLBW, RW(sc->rf_chnlbw[i], R92C_RF_CHNLBW_CHNL, chan)); } #ifndef IEEE80211_NO_HT if (extc != NULL) { /* Is secondary channel below or above primary? */ int prichlo = c->ic_freq < extc->ic_freq; urtwn_write_1(sc, R92C_BWOPMODE, urtwn_read_1(sc, R92C_BWOPMODE) & ~R92C_BWOPMODE_20MHZ); reg = urtwn_read_1(sc, R92C_RRSR + 2); reg = (reg & ~0x6f) | (prichlo ? 1 : 2) << 5; urtwn_write_1(sc, R92C_RRSR + 2, reg); urtwn_bb_write(sc, R92C_FPGA0_RFMOD, urtwn_bb_read(sc, R92C_FPGA0_RFMOD) | R92C_RFMOD_40MHZ); urtwn_bb_write(sc, R92C_FPGA1_RFMOD, urtwn_bb_read(sc, R92C_FPGA1_RFMOD) | R92C_RFMOD_40MHZ); /* Set CCK side band. */ reg = urtwn_bb_read(sc, R92C_CCK0_SYSTEM); reg = (reg & ~0x00000010) | (prichlo ? 0 : 1) << 4; urtwn_bb_write(sc, R92C_CCK0_SYSTEM, reg); reg = urtwn_bb_read(sc, R92C_OFDM1_LSTF); reg = (reg & ~0x00000c00) | (prichlo ? 1 : 2) << 10; urtwn_bb_write(sc, R92C_OFDM1_LSTF, reg); urtwn_bb_write(sc, R92C_FPGA0_ANAPARAM2, urtwn_bb_read(sc, R92C_FPGA0_ANAPARAM2) & ~R92C_FPGA0_ANAPARAM2_CBW20); reg = urtwn_bb_read(sc, 0x818); reg = (reg & ~0x0c000000) | (prichlo ? 2 : 1) << 26; urtwn_bb_write(sc, 0x818, reg); /* Select 40MHz bandwidth. */ urtwn_rf_write(sc, 0, R92C_RF_CHNLBW, (sc->rf_chnlbw[0] & ~0xfff) | chan); } else #endif { urtwn_write_1(sc, R92C_BWOPMODE, urtwn_read_1(sc, R92C_BWOPMODE) | R92C_BWOPMODE_20MHZ); urtwn_bb_write(sc, R92C_FPGA0_RFMOD, urtwn_bb_read(sc, R92C_FPGA0_RFMOD) & ~R92C_RFMOD_40MHZ); urtwn_bb_write(sc, R92C_FPGA1_RFMOD, urtwn_bb_read(sc, R92C_FPGA1_RFMOD) & ~R92C_RFMOD_40MHZ); if (!(sc->chip & URTWN_CHIP_88E)) { urtwn_bb_write(sc, R92C_FPGA0_ANAPARAM2, urtwn_bb_read(sc, R92C_FPGA0_ANAPARAM2) | R92C_FPGA0_ANAPARAM2_CBW20); } /* Select 20MHz bandwidth. */ urtwn_rf_write(sc, 0, R92C_RF_CHNLBW, (sc->rf_chnlbw[0] & ~0xfff) | chan | ((sc->chip & URTWN_CHIP_88E) ? R88E_RF_CHNLBW_BW20 : R92C_RF_CHNLBW_BW20)); } } static void urtwn_iq_calib(struct urtwn_softc *sc) { /* TODO */ } static void urtwn_lc_calib(struct urtwn_softc *sc) { uint32_t rf_ac[2]; uint8_t txmode; int i; txmode = urtwn_read_1(sc, R92C_OFDM1_LSTF + 3); if ((txmode & 0x70) != 0) { /* Disable all continuous Tx. */ urtwn_write_1(sc, R92C_OFDM1_LSTF + 3, txmode & ~0x70); /* Set RF mode to standby mode. */ for (i = 0; i < sc->nrxchains; i++) { rf_ac[i] = urtwn_rf_read(sc, i, R92C_RF_AC); urtwn_rf_write(sc, i, R92C_RF_AC, RW(rf_ac[i], R92C_RF_AC_MODE, R92C_RF_AC_MODE_STANDBY)); } } else { /* Block all Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_ALL); } /* Start calibration. */ urtwn_rf_write(sc, 0, R92C_RF_CHNLBW, urtwn_rf_read(sc, 0, R92C_RF_CHNLBW) | R92C_RF_CHNLBW_LCSTART); /* Give calibration the time to complete. */ usb_pause_mtx(&sc->sc_mtx, hz / 10); /* 100ms */ /* Restore configuration. */ if ((txmode & 0x70) != 0) { /* Restore Tx mode. */ urtwn_write_1(sc, R92C_OFDM1_LSTF + 3, txmode); /* Restore RF mode. */ for (i = 0; i < sc->nrxchains; i++) urtwn_rf_write(sc, i, R92C_RF_AC, rf_ac[i]); } else { /* Unblock all Tx queues. */ urtwn_write_1(sc, R92C_TXPAUSE, 0x00); } } static void urtwn_temp_calib(struct urtwn_softc *sc) { uint8_t temp; URTWN_ASSERT_LOCKED(sc); if (!(sc->sc_flags & URTWN_TEMP_MEASURED)) { /* Start measuring temperature. */ URTWN_DPRINTF(sc, URTWN_DEBUG_TEMP, "%s: start measuring temperature\n", __func__); if (sc->chip & URTWN_CHIP_88E) { urtwn_rf_write(sc, 0, R88E_RF_T_METER, R88E_RF_T_METER_START); } else { urtwn_rf_write(sc, 0, R92C_RF_T_METER, R92C_RF_T_METER_START); } sc->sc_flags |= URTWN_TEMP_MEASURED; return; } sc->sc_flags &= ~URTWN_TEMP_MEASURED; /* Read measured temperature. */ if (sc->chip & URTWN_CHIP_88E) { temp = MS(urtwn_rf_read(sc, 0, R88E_RF_T_METER), R88E_RF_T_METER_VAL); } else { temp = MS(urtwn_rf_read(sc, 0, R92C_RF_T_METER), R92C_RF_T_METER_VAL); } if (temp == 0) { /* Read failed, skip. */ URTWN_DPRINTF(sc, URTWN_DEBUG_TEMP, "%s: temperature read failed, skipping\n", __func__); return; } URTWN_DPRINTF(sc, URTWN_DEBUG_TEMP, "%s: temperature: previous %u, current %u\n", __func__, sc->thcal_lctemp, temp); /* * Redo LC calibration if temperature changed significantly since * last calibration. */ if (sc->thcal_lctemp == 0) { /* First LC calibration is performed in urtwn_init(). */ sc->thcal_lctemp = temp; } else if (abs(temp - sc->thcal_lctemp) > 1) { URTWN_DPRINTF(sc, URTWN_DEBUG_TEMP, "%s: LC calib triggered by temp: %u -> %u\n", __func__, sc->thcal_lctemp, temp); urtwn_lc_calib(sc); /* Record temperature of last LC calibration. */ sc->thcal_lctemp = temp; } } static int urtwn_init(struct urtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint8_t macaddr[IEEE80211_ADDR_LEN]; uint32_t reg; usb_error_t usb_err = USB_ERR_NORMAL_COMPLETION; int error; URTWN_LOCK(sc); if (sc->sc_flags & URTWN_RUNNING) { URTWN_UNLOCK(sc); return (0); } /* Init firmware commands ring. */ sc->fwcur = 0; /* Allocate Tx/Rx buffers. */ error = urtwn_alloc_rx_list(sc); if (error != 0) goto fail; error = urtwn_alloc_tx_list(sc); if (error != 0) goto fail; /* Power on adapter. */ error = urtwn_power_on(sc); if (error != 0) goto fail; /* Initialize DMA. */ error = urtwn_dma_init(sc); if (error != 0) goto fail; /* Set info size in Rx descriptors (in 64-bit words). */ urtwn_write_1(sc, R92C_RX_DRVINFO_SZ, 4); /* Init interrupts. */ if (sc->chip & URTWN_CHIP_88E) { usb_err = urtwn_write_4(sc, R88E_HISR, 0xffffffff); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; usb_err = urtwn_write_4(sc, R88E_HIMR, R88E_HIMR_CPWM | R88E_HIMR_CPWM2 | R88E_HIMR_TBDER | R88E_HIMR_PSTIMEOUT); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; usb_err = urtwn_write_4(sc, R88E_HIMRE, R88E_HIMRE_RXFOVW | R88E_HIMRE_TXFOVW | R88E_HIMRE_RXERR | R88E_HIMRE_TXERR); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; usb_err = urtwn_write_1(sc, R92C_USB_SPECIAL_OPTION, urtwn_read_1(sc, R92C_USB_SPECIAL_OPTION) | R92C_USB_SPECIAL_OPTION_INT_BULK_SEL); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; } else { usb_err = urtwn_write_4(sc, R92C_HISR, 0xffffffff); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; usb_err = urtwn_write_4(sc, R92C_HIMR, 0xffffffff); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; } /* Set MAC address. */ IEEE80211_ADDR_COPY(macaddr, vap ? vap->iv_myaddr : ic->ic_macaddr); usb_err = urtwn_write_region_1(sc, R92C_MACID, macaddr, IEEE80211_ADDR_LEN); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; /* Set initial network type. */ urtwn_set_mode(sc, R92C_MSR_INFRA); /* Initialize Rx filter. */ urtwn_rxfilter_init(sc); /* Set response rate. */ reg = urtwn_read_4(sc, R92C_RRSR); reg = RW(reg, R92C_RRSR_RATE_BITMAP, R92C_RRSR_RATE_CCK_ONLY_1M); urtwn_write_4(sc, R92C_RRSR, reg); /* Set short/long retry limits. */ urtwn_write_2(sc, R92C_RL, SM(R92C_RL_SRL, 0x30) | SM(R92C_RL_LRL, 0x30)); /* Initialize EDCA parameters. */ urtwn_edca_init(sc); /* Setup rate fallback. */ if (!(sc->chip & URTWN_CHIP_88E)) { urtwn_write_4(sc, R92C_DARFRC + 0, 0x00000000); urtwn_write_4(sc, R92C_DARFRC + 4, 0x10080404); urtwn_write_4(sc, R92C_RARFRC + 0, 0x04030201); urtwn_write_4(sc, R92C_RARFRC + 4, 0x08070605); } urtwn_write_1(sc, R92C_FWHW_TXQ_CTRL, urtwn_read_1(sc, R92C_FWHW_TXQ_CTRL) | R92C_FWHW_TXQ_CTRL_AMPDU_RTY_NEW); /* Set ACK timeout. */ urtwn_write_1(sc, R92C_ACKTO, 0x40); /* Setup USB aggregation. */ reg = urtwn_read_4(sc, R92C_TDECTRL); reg = RW(reg, R92C_TDECTRL_BLK_DESC_NUM, 6); urtwn_write_4(sc, R92C_TDECTRL, reg); urtwn_write_1(sc, R92C_TRXDMA_CTRL, urtwn_read_1(sc, R92C_TRXDMA_CTRL) | R92C_TRXDMA_CTRL_RXDMA_AGG_EN); urtwn_write_1(sc, R92C_RXDMA_AGG_PG_TH, 48); if (sc->chip & URTWN_CHIP_88E) urtwn_write_1(sc, R92C_RXDMA_AGG_PG_TH + 1, 4); else { urtwn_write_1(sc, R92C_USB_DMA_AGG_TO, 4); urtwn_write_1(sc, R92C_USB_SPECIAL_OPTION, urtwn_read_1(sc, R92C_USB_SPECIAL_OPTION) | R92C_USB_SPECIAL_OPTION_AGG_EN); urtwn_write_1(sc, R92C_USB_AGG_TH, 8); urtwn_write_1(sc, R92C_USB_AGG_TO, 6); } /* Initialize beacon parameters. */ urtwn_write_2(sc, R92C_BCN_CTRL, 0x1010); urtwn_write_2(sc, R92C_TBTT_PROHIBIT, 0x6404); urtwn_write_1(sc, R92C_DRVERLYINT, 0x05); urtwn_write_1(sc, R92C_BCNDMATIM, 0x02); urtwn_write_2(sc, R92C_BCNTCFG, 0x660f); if (!(sc->chip & URTWN_CHIP_88E)) { /* Setup AMPDU aggregation. */ urtwn_write_4(sc, R92C_AGGLEN_LMT, 0x99997631); /* MCS7~0 */ urtwn_write_1(sc, R92C_AGGR_BREAK_TIME, 0x16); urtwn_write_2(sc, R92C_MAX_AGGR_NUM, 0x0708); urtwn_write_1(sc, R92C_BCN_MAX_ERR, 0xff); } #ifndef URTWN_WITHOUT_UCODE /* Load 8051 microcode. */ error = urtwn_load_firmware(sc); if (error == 0) sc->sc_flags |= URTWN_FW_LOADED; #endif /* Initialize MAC/BB/RF blocks. */ error = urtwn_mac_init(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: error while initializing MAC block\n", __func__); goto fail; } urtwn_bb_init(sc); urtwn_rf_init(sc); /* Reinitialize Rx filter (D3845 is not committed yet). */ urtwn_rxfilter_init(sc); if (sc->chip & URTWN_CHIP_88E) { urtwn_write_2(sc, R92C_CR, urtwn_read_2(sc, R92C_CR) | R92C_CR_MACTXEN | R92C_CR_MACRXEN); } /* Turn CCK and OFDM blocks on. */ reg = urtwn_bb_read(sc, R92C_FPGA0_RFMOD); reg |= R92C_RFMOD_CCK_EN; usb_err = urtwn_bb_write(sc, R92C_FPGA0_RFMOD, reg); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; reg = urtwn_bb_read(sc, R92C_FPGA0_RFMOD); reg |= R92C_RFMOD_OFDM_EN; usb_err = urtwn_bb_write(sc, R92C_FPGA0_RFMOD, reg); if (usb_err != USB_ERR_NORMAL_COMPLETION) goto fail; /* Clear per-station keys table. */ urtwn_cam_init(sc); /* Enable decryption / encryption. */ urtwn_write_2(sc, R92C_SECCFG, R92C_SECCFG_TXUCKEY_DEF | R92C_SECCFG_RXUCKEY_DEF | R92C_SECCFG_TXENC_ENA | R92C_SECCFG_RXDEC_ENA | R92C_SECCFG_TXBCKEY_DEF | R92C_SECCFG_RXBCKEY_DEF); /* * Install static keys (if any). * Must be called after urtwn_cam_init(). */ ieee80211_runtask(ic, &sc->cmdq_task); /* Enable hardware sequence numbering. */ urtwn_write_1(sc, R92C_HWSEQ_CTRL, R92C_TX_QUEUE_ALL); /* Enable per-packet TX report. */ if (sc->chip & URTWN_CHIP_88E) { urtwn_write_1(sc, R88E_TX_RPT_CTRL, urtwn_read_1(sc, R88E_TX_RPT_CTRL) | R88E_TX_RPT1_ENA); } /* Perform LO and IQ calibrations. */ urtwn_iq_calib(sc); /* Perform LC calibration. */ urtwn_lc_calib(sc); /* Fix USB interference issue. */ if (!(sc->chip & URTWN_CHIP_88E)) { urtwn_write_1(sc, 0xfe40, 0xe0); urtwn_write_1(sc, 0xfe41, 0x8d); urtwn_write_1(sc, 0xfe42, 0x80); urtwn_pa_bias_init(sc); } /* Initialize GPIO setting. */ urtwn_write_1(sc, R92C_GPIO_MUXCFG, urtwn_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_ENBT); /* Fix for lower temperature. */ if (!(sc->chip & URTWN_CHIP_88E)) urtwn_write_1(sc, 0x15, 0xe9); usbd_transfer_start(sc->sc_xfer[URTWN_BULK_RX]); sc->sc_flags |= URTWN_RUNNING; callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); fail: if (usb_err != USB_ERR_NORMAL_COMPLETION) error = EIO; URTWN_UNLOCK(sc); return (error); } static void urtwn_stop(struct urtwn_softc *sc) { URTWN_LOCK(sc); if (!(sc->sc_flags & URTWN_RUNNING)) { URTWN_UNLOCK(sc); return; } sc->sc_flags &= ~(URTWN_RUNNING | URTWN_FW_LOADED | URTWN_TEMP_MEASURED); sc->thcal_lctemp = 0; callout_stop(&sc->sc_watchdog_ch); urtwn_abort_xfers(sc); urtwn_drain_mbufq(sc); urtwn_power_off(sc); URTWN_UNLOCK(sc); } static void urtwn_abort_xfers(struct urtwn_softc *sc) { int i; URTWN_ASSERT_LOCKED(sc); /* abort any pending transfers */ for (i = 0; i < URTWN_N_TRANSFER; i++) usbd_transfer_stop(sc->sc_xfer[i]); } static int urtwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct urtwn_softc *sc = ic->ic_softc; struct urtwn_data *bf; int error; URTWN_DPRINTF(sc, URTWN_DEBUG_XMIT, "%s: called; m=%p\n", __func__, m); /* prevent management frames from being sent if we're not ready */ URTWN_LOCK(sc); if (!(sc->sc_flags & URTWN_RUNNING)) { error = ENETDOWN; goto end; } bf = urtwn_getbuf(sc); if (bf == NULL) { error = ENOBUFS; goto end; } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ error = urtwn_tx_data(sc, ni, m, bf); } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ error = urtwn_tx_raw(sc, ni, m, bf, params); } if (error != 0) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next); goto end; } sc->sc_txtimer = 5; callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc); end: if (error != 0) m_freem(m); URTWN_UNLOCK(sc); return (error); } static void urtwn_ms_delay(struct urtwn_softc *sc) { usb_pause_mtx(&sc->sc_mtx, hz / 1000); } static device_method_t urtwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, urtwn_match), DEVMETHOD(device_attach, urtwn_attach), DEVMETHOD(device_detach, urtwn_detach), DEVMETHOD_END }; static driver_t urtwn_driver = { "urtwn", urtwn_methods, sizeof(struct urtwn_softc) }; static devclass_t urtwn_devclass; DRIVER_MODULE(urtwn, uhub, urtwn_driver, urtwn_devclass, NULL, NULL); MODULE_DEPEND(urtwn, usb, 1, 1, 1); MODULE_DEPEND(urtwn, wlan, 1, 1, 1); #ifndef URTWN_WITHOUT_UCODE MODULE_DEPEND(urtwn, firmware, 1, 1, 1); #endif MODULE_VERSION(urtwn, 1); USB_PNP_HOST_INFO(urtwn_devs); Index: projects/release-pkg/sys/dev/urtwn/if_urtwnvar.h =================================================================== --- projects/release-pkg/sys/dev/urtwn/if_urtwnvar.h (revision 297926) +++ projects/release-pkg/sys/dev/urtwn/if_urtwnvar.h (revision 297927) @@ -1,239 +1,242 @@ /*- * Copyright (c) 2010 Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $OpenBSD: if_urtwnreg.h,v 1.3 2010/11/16 18:02:59 damien Exp $ * $FreeBSD$ */ #define URTWN_RX_LIST_COUNT 64 #define URTWN_TX_LIST_COUNT 8 #define URTWN_HOST_CMD_RING_COUNT 32 #define URTWN_RXBUFSZ (8 * 1024) //#define URTWN_TXBUFSZ (sizeof(struct r92c_tx_desc) + IEEE80211_MAX_LEN) /* Leave enough space for an A-MSDU frame */ #define URTWN_TXBUFSZ (16 * 1024) #define URTWN_RX_DESC_SIZE (sizeof(struct r92c_rx_stat)) #define URTWN_TX_DESC_SIZE (sizeof(struct r92c_tx_desc)) #define URTWN_TX_TIMEOUT 5000 /* ms */ #define URTWN_LED_LINK 0 #define URTWN_LED_DATA 1 struct urtwn_rx_radiotap_header { struct ieee80211_radiotap_header wr_ihdr; uint64_t wr_tsft; uint8_t wr_flags; uint8_t wr_rate; uint16_t wr_chan_freq; uint16_t wr_chan_flags; int8_t wr_dbm_antsignal; int8_t wr_dbm_antnoise; } __packed __aligned(8); #define URTWN_RX_RADIOTAP_PRESENT \ (1 << IEEE80211_RADIOTAP_TSFT | \ 1 << IEEE80211_RADIOTAP_FLAGS | \ 1 << IEEE80211_RADIOTAP_RATE | \ 1 << IEEE80211_RADIOTAP_CHANNEL | \ 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL | \ 1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) struct urtwn_tx_radiotap_header { struct ieee80211_radiotap_header wt_ihdr; uint8_t wt_flags; uint16_t wt_chan_freq; uint16_t wt_chan_flags; } __packed __aligned(8); #define URTWN_TX_RADIOTAP_PRESENT \ (1 << IEEE80211_RADIOTAP_FLAGS | \ 1 << IEEE80211_RADIOTAP_CHANNEL) struct urtwn_softc; struct urtwn_data { struct urtwn_softc *sc; uint8_t *buf; uint16_t buflen; struct mbuf *m; struct ieee80211_node *ni; STAILQ_ENTRY(urtwn_data) next; }; typedef STAILQ_HEAD(, urtwn_data) urtwn_datahead; union sec_param { struct ieee80211_key key; }; #define CMD_FUNC_PROTO void (*func)(struct urtwn_softc *, \ union sec_param *) struct urtwn_cmdq { union sec_param data; CMD_FUNC_PROTO; }; #define URTWN_CMDQ_SIZE 16 struct urtwn_fw_info { const uint8_t *data; size_t size; }; struct urtwn_node { struct ieee80211_node ni; /* must be the first */ uint8_t id; + int last_rssi; }; #define URTWN_NODE(ni) ((struct urtwn_node *)(ni)) struct urtwn_vap { struct ieee80211vap vap; struct r92c_tx_desc bcn_desc; struct mbuf *bcn_mbuf; struct task tsf_task_adhoc; int (*newstate)(struct ieee80211vap *, enum ieee80211_state, int); void (*recv_mgmt)(struct ieee80211_node *, struct mbuf *, int, const struct ieee80211_rx_stats *, int, int); }; #define URTWN_VAP(vap) ((struct urtwn_vap *)(vap)) struct urtwn_host_cmd { void (*cb)(struct urtwn_softc *, void *); uint8_t data[256]; }; struct urtwn_cmd_newstate { enum ieee80211_state state; int arg; }; struct urtwn_cmd_key { struct ieee80211_key key; uint16_t associd; }; enum { URTWN_BULK_RX, URTWN_BULK_TX_BE, /* = WME_AC_BE */ URTWN_BULK_TX_BK, /* = WME_AC_BK */ URTWN_BULK_TX_VI, /* = WME_AC_VI */ URTWN_BULK_TX_VO, /* = WME_AC_VI */ URTWN_N_TRANSFER = 5, }; #define URTWN_EP_QUEUES URTWN_BULK_RX union urtwn_rom { struct r92c_rom r92c_rom; struct r88e_rom r88e_rom; }; struct urtwn_softc { struct ieee80211com sc_ic; struct mbufq sc_snd; device_t sc_dev; struct usb_device *sc_udev; uint32_t sc_debug; uint8_t sc_iface_index; uint8_t sc_flags; #define URTWN_FLAG_CCK_HIPWR 0x01 #define URTWN_DETACHED 0x02 #define URTWN_RUNNING 0x04 #define URTWN_FW_LOADED 0x08 #define URTWN_TEMP_MEASURED 0x10 u_int chip; #define URTWN_CHIP_92C 0x01 #define URTWN_CHIP_92C_1T2R 0x02 #define URTWN_CHIP_UMC 0x04 #define URTWN_CHIP_UMC_A_CUT 0x08 #define URTWN_CHIP_88E 0x10 #define URTWN_CHIP_HAS_RATECTL(_sc) (!!((_sc)->chip & URTWN_CHIP_88E)) void (*sc_node_free)(struct ieee80211_node *); void (*sc_rf_write)(struct urtwn_softc *, int, uint8_t, uint32_t); int (*sc_power_on)(struct urtwn_softc *); void (*sc_power_off)(struct urtwn_softc *); struct ieee80211_node *node_list[R88E_MACID_MAX + 1]; struct mtx nt_mtx; uint8_t board_type; uint8_t regulatory; uint8_t pa_setting; int8_t ofdm_tx_pwr_diff; int8_t bw20_tx_pwr_diff; int avg_pwdb; uint8_t thcal_lctemp; int ntxchains; int nrxchains; int ledlink; int sc_txtimer; + + int last_rssi; int fwcur; struct urtwn_data sc_rx[URTWN_RX_LIST_COUNT]; urtwn_datahead sc_rx_active; urtwn_datahead sc_rx_inactive; struct urtwn_data sc_tx[URTWN_TX_LIST_COUNT]; urtwn_datahead sc_tx_active; int sc_tx_n_active; urtwn_datahead sc_tx_inactive; urtwn_datahead sc_tx_pending; union urtwn_rom rom; uint16_t last_rom_addr; struct callout sc_calib_to; struct callout sc_watchdog_ch; struct mtx sc_mtx; uint32_t keys_bmap; struct urtwn_cmdq cmdq[URTWN_CMDQ_SIZE]; struct mtx cmdq_mtx; struct task cmdq_task; uint8_t cmdq_first; uint8_t cmdq_last; uint32_t rf_chnlbw[R92C_MAX_CHAINS]; struct usb_xfer *sc_xfer[URTWN_N_TRANSFER]; struct urtwn_rx_radiotap_header sc_rxtap; struct urtwn_tx_radiotap_header sc_txtap; }; #define URTWN_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define URTWN_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define URTWN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define URTWN_CMDQ_LOCK_INIT(sc) \ mtx_init(&(sc)->cmdq_mtx, "cmdq lock", NULL, MTX_DEF) #define URTWN_CMDQ_LOCK(sc) mtx_lock(&(sc)->cmdq_mtx) #define URTWN_CMDQ_UNLOCK(sc) mtx_unlock(&(sc)->cmdq_mtx) #define URTWN_CMDQ_LOCK_DESTROY(sc) mtx_destroy(&(sc)->cmdq_mtx) #define URTWN_NT_LOCK_INIT(sc) \ mtx_init(&(sc)->nt_mtx, "node table lock", NULL, MTX_DEF) #define URTWN_NT_LOCK(sc) mtx_lock(&(sc)->nt_mtx) #define URTWN_NT_UNLOCK(sc) mtx_unlock(&(sc)->nt_mtx) #define URTWN_NT_LOCK_DESTROY(sc) mtx_destroy(&(sc)->nt_mtx) Index: projects/release-pkg/sys =================================================================== --- projects/release-pkg/sys (revision 297926) +++ projects/release-pkg/sys (revision 297927) Property changes on: projects/release-pkg/sys ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys:r297906-297926 Index: projects/release-pkg/usr.sbin/ypldap/ldapclient.c =================================================================== --- projects/release-pkg/usr.sbin/ypldap/ldapclient.c (revision 297926) +++ projects/release-pkg/usr.sbin/ypldap/ldapclient.c (revision 297927) @@ -1,705 +1,694 @@ /* $OpenBSD: ldapclient.c,v 1.31 2014/11/16 23:24:44 tedu Exp $ */ /* $FreeBSD$ */ /* * Copyright (c) 2008 Alexander Schrijver * Copyright (c) 2008 Pierre-Yves Ritschard * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "aldap.h" #include "ypldap.h" void client_sig_handler(int, short, void *); void client_dispatch_dns(int, short, void *); void client_dispatch_parent(int, short, void *); void client_shutdown(void); void client_connect(int, short, void *); void client_configure(struct env *); void client_periodic_update(int, short, void *); int client_build_req(struct idm *, struct idm_req *, struct aldap_message *, int, int); int client_search_idm(struct env *, struct idm *, struct aldap *, char **, char *, int, int, enum imsg_type); int client_try_idm(struct env *, struct idm *); int client_addr_init(struct idm *); int client_addr_free(struct idm *); -struct aldap *client_aldap_open(struct ypldap_addr *); +struct aldap *client_aldap_open(struct ypldap_addr_list *); /* * dummy wrapper to provide aldap_init with its fd's. */ struct aldap * -client_aldap_open(struct ypldap_addr *addr) +client_aldap_open(struct ypldap_addr_list *addr) { int fd = -1; struct ypldap_addr *p; - for (p = addr; p != NULL; p = p->next) { + TAILQ_FOREACH(p, addr, next) { char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV]; struct sockaddr *sa = (struct sockaddr *)&p->ss; if (getnameinfo(sa, sa->sa_len, hbuf, sizeof(hbuf), sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV)) errx(1, "could not get numeric hostname"); if ((fd = socket(sa->sa_family, SOCK_STREAM, 0)) < 0) return NULL; if (connect(fd, sa, sa->sa_len) == 0) break; warn("connect to %s port %s (%s) failed", hbuf, sbuf, "tcp"); close(fd); } if (fd == -1) return NULL; return aldap_init(fd); } int client_addr_init(struct idm *idm) { struct sockaddr_in *sa_in; struct sockaddr_in6 *sa_in6; struct ypldap_addr *h; - for (h = idm->idm_addr; h != NULL; h = h->next) { + TAILQ_FOREACH(h, &idm->idm_addr, next) { switch (h->ss.ss_family) { case AF_INET: sa_in = (struct sockaddr_in *)&h->ss; if (ntohs(sa_in->sin_port) == 0) sa_in->sin_port = htons(LDAP_PORT); idm->idm_state = STATE_DNS_DONE; break; case AF_INET6: sa_in6 = (struct sockaddr_in6 *)&h->ss; if (ntohs(sa_in6->sin6_port) == 0) sa_in6->sin6_port = htons(LDAP_PORT); idm->idm_state = STATE_DNS_DONE; break; default: fatalx("king bula sez: wrong AF in client_addr_init"); /* not reached */ } } return (0); } int client_addr_free(struct idm *idm) { - struct ypldap_addr *h, *p; + struct ypldap_addr *h; - if (idm->idm_addr == NULL) - return (-1); - - for (h = idm->idm_addr; h != NULL; h = p) { - p = h->next; + while (!TAILQ_EMPTY(&idm->idm_addr)) { + h = TAILQ_FIRST(&idm->idm_addr); + TAILQ_REMOVE(&idm->idm_addr, h, next); free(h); } - idm->idm_addr = NULL; - return (0); } void client_sig_handler(int sig, short event, void *p) { switch (sig) { case SIGINT: case SIGTERM: client_shutdown(); break; default: fatalx("unexpected signal"); } } void client_dispatch_dns(int fd, short events, void *p) { struct imsg imsg; u_int16_t dlen; u_char *data; struct ypldap_addr *h; int n, wait_cnt = 0; struct idm *idm; int shut = 0; struct env *env = p; struct imsgev *iev = env->sc_iev_dns; struct imsgbuf *ibuf = &iev->ibuf; if ((events & (EV_READ | EV_WRITE)) == 0) fatalx("unknown event"); if (events & EV_READ) { if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) fatal("imsg_read error"); if (n == 0) shut = 1; } if (events & EV_WRITE) { if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN) fatal("msgbuf_write"); if (n == 0) shut = 1; goto done; } for (;;) { if ((n = imsg_get(ibuf, &imsg)) == -1) fatal("client_dispatch_dns: imsg_get error"); if (n == 0) break; switch (imsg.hdr.type) { case IMSG_HOST_DNS: TAILQ_FOREACH(idm, &env->sc_idms, idm_entry) if (idm->idm_id == imsg.hdr.peerid) break; if (idm == NULL) { log_warnx("IMSG_HOST_DNS with invalid peerID"); break; } - if (idm->idm_addr != NULL) { - log_warnx("IMSG_HOST_DNS but addr != NULL!"); + if (!TAILQ_EMPTY(&idm->idm_addr)) { + log_warnx("IMSG_HOST_DNS but addrs set!"); break; } dlen = imsg.hdr.len - IMSG_HEADER_SIZE; if (dlen == 0) { /* no data -> temp error */ idm->idm_state = STATE_DNS_TEMPFAIL; break; } data = (u_char *)imsg.data; while (dlen >= sizeof(struct sockaddr_storage)) { - if ((h = calloc(1, sizeof(struct ypldap_addr))) == - NULL) + if ((h = calloc(1, sizeof(*h))) == NULL) fatal(NULL); memcpy(&h->ss, data, sizeof(h->ss)); + TAILQ_INSERT_HEAD(&idm->idm_addr, h, next); - if (idm->idm_addr == NULL) - h->next = NULL; - else - h->next = idm->idm_addr; - - idm->idm_addr = h; - data += sizeof(h->ss); dlen -= sizeof(h->ss); } if (dlen != 0) fatalx("IMSG_HOST_DNS: dlen != 0"); client_addr_init(idm); break; default: break; } imsg_free(&imsg); } TAILQ_FOREACH(idm, &env->sc_idms, idm_entry) { if (client_try_idm(env, idm) == -1) idm->idm_state = STATE_LDAP_FAIL; if (idm->idm_state < STATE_LDAP_DONE) wait_cnt++; } if (wait_cnt == 0) imsg_compose_event(env->sc_iev, IMSG_END_UPDATE, 0, 0, -1, NULL, 0); done: if (!shut) imsg_event_add(iev); else { /* this pipe is dead, so remove the event handler */ event_del(&iev->ev); event_loopexit(NULL); } } void client_dispatch_parent(int fd, short events, void *p) { int n; int shut = 0; struct imsg imsg; struct env *env = p; struct imsgev *iev = env->sc_iev; struct imsgbuf *ibuf = &iev->ibuf; if ((events & (EV_READ | EV_WRITE)) == 0) fatalx("unknown event"); if (events & EV_READ) { if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) fatal("imsg_read error"); if (n == 0) shut = 1; } if (events & EV_WRITE) { if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN) fatal("msgbuf_write"); if (n == 0) shut = 1; goto done; } for (;;) { if ((n = imsg_get(ibuf, &imsg)) == -1) fatal("client_dispatch_parent: imsg_get error"); if (n == 0) break; switch (imsg.hdr.type) { case IMSG_CONF_START: { struct env params; if (env->sc_flags & F_CONFIGURING) { log_warnx("configuration already in progress"); break; } memcpy(¶ms, imsg.data, sizeof(params)); log_debug("configuration starting"); env->sc_flags |= F_CONFIGURING; purge_config(env); memcpy(&env->sc_conf_tv, ¶ms.sc_conf_tv, sizeof(env->sc_conf_tv)); env->sc_flags |= params.sc_flags; break; } case IMSG_CONF_IDM: { struct idm *idm; if (!(env->sc_flags & F_CONFIGURING)) break; if ((idm = calloc(1, sizeof(*idm))) == NULL) fatal(NULL); memcpy(idm, imsg.data, sizeof(*idm)); idm->idm_env = env; TAILQ_INSERT_TAIL(&env->sc_idms, idm, idm_entry); break; } case IMSG_CONF_END: env->sc_flags &= ~F_CONFIGURING; log_debug("applying configuration"); client_configure(env); break; default: log_debug("client_dispatch_parent: unexpect imsg %d", imsg.hdr.type); break; } imsg_free(&imsg); } done: if (!shut) imsg_event_add(iev); else { /* this pipe is dead, so remove the event handler */ event_del(&iev->ev); event_loopexit(NULL); } } void client_shutdown(void) { log_info("ldap client exiting"); _exit(0); } pid_t ldapclient(int pipe_main2client[2]) { pid_t pid, dns_pid; int pipe_dns[2]; struct passwd *pw; struct event ev_sigint; struct event ev_sigterm; struct env env; switch (pid = fork()) { case -1: fatal("cannot fork"); break; case 0: break; default: return (pid); } bzero(&env, sizeof(env)); TAILQ_INIT(&env.sc_idms); if ((pw = getpwnam(YPLDAP_USER)) == NULL) fatal("getpwnam"); if (socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, pipe_dns) == -1) fatal("socketpair"); dns_pid = ypldap_dns(pipe_dns, pw); close(pipe_dns[1]); #ifndef DEBUG if (chroot(pw->pw_dir) == -1) fatal("chroot"); if (chdir("/") == -1) fatal("chdir"); #else #warning disabling chrooting in DEBUG mode #endif setproctitle("ldap client"); ypldap_process = PROC_CLIENT; #ifndef DEBUG if (setgroups(1, &pw->pw_gid) || setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) fatal("cannot drop privileges"); #else #warning disabling privilege revocation in DEBUG mode #endif event_init(); signal(SIGPIPE, SIG_IGN); signal_set(&ev_sigint, SIGINT, client_sig_handler, NULL); signal_set(&ev_sigterm, SIGTERM, client_sig_handler, NULL); signal_add(&ev_sigint, NULL); signal_add(&ev_sigterm, NULL); close(pipe_main2client[0]); if ((env.sc_iev = calloc(1, sizeof(*env.sc_iev))) == NULL) fatal(NULL); if ((env.sc_iev_dns = calloc(1, sizeof(*env.sc_iev_dns))) == NULL) fatal(NULL); env.sc_iev->events = EV_READ; env.sc_iev->data = &env; imsg_init(&env.sc_iev->ibuf, pipe_main2client[1]); env.sc_iev->handler = client_dispatch_parent; event_set(&env.sc_iev->ev, env.sc_iev->ibuf.fd, env.sc_iev->events, env.sc_iev->handler, &env); event_add(&env.sc_iev->ev, NULL); env.sc_iev_dns->events = EV_READ; env.sc_iev_dns->data = &env; imsg_init(&env.sc_iev_dns->ibuf, pipe_dns[0]); env.sc_iev_dns->handler = client_dispatch_dns; event_set(&env.sc_iev_dns->ev, env.sc_iev_dns->ibuf.fd, env.sc_iev_dns->events, env.sc_iev_dns->handler, &env); event_add(&env.sc_iev_dns->ev, NULL); event_dispatch(); client_shutdown(); return (0); } int client_build_req(struct idm *idm, struct idm_req *ir, struct aldap_message *m, int min_attr, int max_attr) { char **ldap_attrs; int i, k; bzero(ir, sizeof(*ir)); for (i = min_attr; i < max_attr; i++) { if (idm->idm_flags & F_FIXED_ATTR(i)) { if (strlcat(ir->ir_line, idm->idm_attrs[i], sizeof(ir->ir_line)) >= sizeof(ir->ir_line)) /* * entry yields a line > 1024, trash it. */ return (-1); if (i == ATTR_UID) { ir->ir_key.ik_uid = strtonum( idm->idm_attrs[i], 0, UID_MAX, NULL); } else if (i == ATTR_GR_GID) { ir->ir_key.ik_gid = strtonum( idm->idm_attrs[i], 0, GID_MAX, NULL); } } else if (idm->idm_list & F_LIST(i)) { aldap_match_attr(m, idm->idm_attrs[i], &ldap_attrs); for (k = 0; k >= 0 && ldap_attrs && ldap_attrs[k] != NULL; k++) { /* XXX: Fail when attributes have illegal characters e.g. ',' */ if (strlcat(ir->ir_line, ldap_attrs[k], sizeof(ir->ir_line)) >= sizeof(ir->ir_line)) continue; if (ldap_attrs[k+1] != NULL) if (strlcat(ir->ir_line, ",", sizeof(ir->ir_line)) >= sizeof(ir->ir_line)) { aldap_free_attr(ldap_attrs); return (-1); } } aldap_free_attr(ldap_attrs); } else { if (aldap_match_attr(m, idm->idm_attrs[i], &ldap_attrs) == -1) return (-1); if (ldap_attrs[0] == NULL) return (-1); if (strlcat(ir->ir_line, ldap_attrs[0], sizeof(ir->ir_line)) >= sizeof(ir->ir_line)) { aldap_free_attr(ldap_attrs); return (-1); } if (i == ATTR_UID) { ir->ir_key.ik_uid = strtonum( ldap_attrs[0], 0, UID_MAX, NULL); } else if (i == ATTR_GR_GID) { ir->ir_key.ik_uid = strtonum( ldap_attrs[0], 0, GID_MAX, NULL); } aldap_free_attr(ldap_attrs); } if (i + 1 != max_attr) if (strlcat(ir->ir_line, ":", sizeof(ir->ir_line)) >= sizeof(ir->ir_line)) return (-1); } return (0); } int client_search_idm(struct env *env, struct idm *idm, struct aldap *al, char **attrs, char *filter, int min_attr, int max_attr, enum imsg_type type) { struct idm_req ir; struct aldap_message *m; struct aldap_page_control *pg = NULL; const char *errstr; char *dn; dn = idm->idm_basedn; if (type == IMSG_GRP_ENTRY && idm->idm_groupdn[0] != '\0') dn = idm->idm_groupdn; do { if (aldap_search(al, dn, LDAP_SCOPE_SUBTREE, filter, attrs, 0, 0, 0, pg) == -1) { aldap_get_errno(al, &errstr); log_debug("%s", errstr); return (-1); } if (pg != NULL) { aldap_freepage(pg); pg = NULL; } while ((m = aldap_parse(al)) != NULL) { if (al->msgid != m->msgid) { goto fail; } if (m->message_type == LDAP_RES_SEARCH_RESULT) { if (m->page != NULL && m->page->cookie_len != 0) pg = m->page; else pg = NULL; aldap_freemsg(m); break; } if (m->message_type != LDAP_RES_SEARCH_ENTRY) { goto fail; } if (client_build_req(idm, &ir, m, min_attr, max_attr) == 0) imsg_compose_event(env->sc_iev, type, 0, 0, -1, &ir, sizeof(ir)); aldap_freemsg(m); } } while (pg != NULL); return (0); fail: aldap_freemsg(m); if (pg != NULL) { aldap_freepage(pg); } return (-1); } int client_try_idm(struct env *env, struct idm *idm) { const char *where; char *attrs[ATTR_MAX+1]; int i, j; struct aldap_message *m; struct aldap *al; where = "connect"; - if ((al = client_aldap_open(idm->idm_addr)) == NULL) + if ((al = client_aldap_open(&idm->idm_addr)) == NULL) return (-1); if (idm->idm_flags & F_NEEDAUTH) { where = "binding"; if (aldap_bind(al, idm->idm_binddn, idm->idm_bindcred) == -1) goto bad; where = "parsing"; if ((m = aldap_parse(al)) == NULL) goto bad; where = "verifying msgid"; if (al->msgid != m->msgid) { aldap_freemsg(m); goto bad; } aldap_freemsg(m); } bzero(attrs, sizeof(attrs)); for (i = 0, j = 0; i < ATTR_MAX; i++) { if (idm->idm_flags & F_FIXED_ATTR(i)) continue; attrs[j++] = idm->idm_attrs[i]; } attrs[j] = NULL; /* * build password line. */ where = "search"; log_debug("searching password entries"); if (client_search_idm(env, idm, al, attrs, idm->idm_filters[FILTER_USER], 0, ATTR_MAX, IMSG_PW_ENTRY) == -1) goto bad; bzero(attrs, sizeof(attrs)); for (i = ATTR_GR_MIN, j = 0; i < ATTR_GR_MAX; i++) { if (idm->idm_flags & F_FIXED_ATTR(i)) continue; attrs[j++] = idm->idm_attrs[i]; } attrs[j] = NULL; /* * build group line. */ where = "search"; log_debug("searching group entries"); if (client_search_idm(env, idm, al, attrs, idm->idm_filters[FILTER_GROUP], ATTR_GR_MIN, ATTR_GR_MAX, IMSG_GRP_ENTRY) == -1) goto bad; aldap_close(al); idm->idm_state = STATE_LDAP_DONE; return (0); bad: aldap_close(al); log_debug("directory %s errored out in %s", idm->idm_name, where); return (-1); } void client_periodic_update(int fd, short event, void *p) { struct env *env = p; struct idm *idm; int fail_cnt = 0; /* If LDAP isn't finished, notify the master process to trash the * update. */ TAILQ_FOREACH(idm, &env->sc_idms, idm_entry) { if (idm->idm_state < STATE_LDAP_DONE) fail_cnt++; idm->idm_state = STATE_NONE; client_addr_free(idm); } if (fail_cnt > 0) { log_debug("trash the update"); imsg_compose_event(env->sc_iev, IMSG_TRASH_UPDATE, 0, 0, -1, NULL, 0); } client_configure(env); } void client_configure(struct env *env) { struct timeval tv; struct idm *idm; u_int16_t dlen; log_debug("connecting to directories"); imsg_compose_event(env->sc_iev, IMSG_START_UPDATE, 0, 0, -1, NULL, 0); /* Start the DNS lookups */ TAILQ_FOREACH(idm, &env->sc_idms, idm_entry) { dlen = strlen(idm->idm_name) + 1; imsg_compose_event(env->sc_iev_dns, IMSG_HOST_DNS, idm->idm_id, 0, -1, idm->idm_name, dlen); } tv.tv_sec = env->sc_conf_tv.tv_sec; tv.tv_usec = env->sc_conf_tv.tv_usec; evtimer_set(&env->sc_conf_ev, client_periodic_update, env); evtimer_add(&env->sc_conf_ev, &tv); } Index: projects/release-pkg/usr.sbin/ypldap/ypldap.h =================================================================== --- projects/release-pkg/usr.sbin/ypldap/ypldap.h (revision 297926) +++ projects/release-pkg/usr.sbin/ypldap/ypldap.h (revision 297927) @@ -1,222 +1,223 @@ /* $OpenBSD: ypldap.h,v 1.16 2015/01/16 06:40:22 deraadt Exp $ */ /* $FreeBSD$ */ /* * Copyright (c) 2008 Pierre-Yves Ritschard * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #define YPLDAP_USER "_ypldap" #define YPLDAP_CONF_FILE "/etc/ypldap.conf" #define DEFAULT_INTERVAL 600 #define LINE_WIDTH 1024 #define FILTER_WIDTH 128 #define ATTR_WIDTH 32 #define MAX_SERVERS_DNS 8 enum imsg_type { IMSG_NONE, IMSG_CONF_START, IMSG_CONF_IDM, IMSG_CONF_END, IMSG_START_UPDATE, IMSG_END_UPDATE, IMSG_TRASH_UPDATE, IMSG_PW_ENTRY, IMSG_GRP_ENTRY, IMSG_HOST_DNS }; struct ypldap_addr { - struct ypldap_addr *next; - struct sockaddr_storage ss; + TAILQ_ENTRY(ypldap_addr) next; + struct sockaddr_storage ss; }; +TAILQ_HEAD(ypldap_addr_list, ypldap_addr); enum { PROC_MAIN, PROC_CLIENT } ypldap_process; struct userent { RB_ENTRY(userent) ue_name_node; RB_ENTRY(userent) ue_uid_node; uid_t ue_uid; char *ue_line; char *ue_netid_line; gid_t ue_gid; }; struct groupent { RB_ENTRY(groupent) ge_name_node; RB_ENTRY(groupent) ge_gid_node; gid_t ge_gid; char *ge_line; }; enum client_state { STATE_NONE, STATE_DNS_INPROGRESS, STATE_DNS_TEMPFAIL, STATE_DNS_DONE, STATE_LDAP_FAIL, STATE_LDAP_DONE }; /* * beck, djm, dlg: pay attention to the struct name */ struct idm { TAILQ_ENTRY(idm) idm_entry; u_int32_t idm_id; char idm_name[MAXHOSTNAMELEN]; #define F_SSL 0x00100000 #define F_CONFIGURING 0x00200000 #define F_NEEDAUTH 0x00400000 #define F_FIXED_ATTR(n) (1< * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ypldap.h" volatile sig_atomic_t quit_dns = 0; struct imsgev *iev_dns; void dns_dispatch_imsg(int, short, void *); void dns_sig_handler(int, short, void *); void dns_shutdown(void); -int host_dns(const char *s, struct ypldap_addr **hn); +int host_dns(const char *, struct ypldap_addr_list *); void dns_sig_handler(int sig, short event, void *p) { switch (sig) { case SIGINT: case SIGTERM: dns_shutdown(); break; default: fatalx("unexpected signal"); } } void dns_shutdown(void) { log_info("dns engine exiting"); _exit(0); } pid_t ypldap_dns(int pipe_ntp[2], struct passwd *pw) { pid_t pid; struct event ev_sigint; struct event ev_sigterm; struct event ev_sighup; struct env env; switch (pid = fork()) { case -1: fatal("cannot fork"); break; case 0: break; default: return (pid); } setproctitle("dns engine"); close(pipe_ntp[0]); if (setgroups(1, &pw->pw_gid) || setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) fatal("can't drop privileges"); endservent(); event_init(); signal_set(&ev_sigint, SIGINT, dns_sig_handler, NULL); signal_set(&ev_sigterm, SIGTERM, dns_sig_handler, NULL); signal_set(&ev_sighup, SIGHUP, dns_sig_handler, NULL); signal_add(&ev_sigint, NULL); signal_add(&ev_sigterm, NULL); signal_add(&ev_sighup, NULL); if ((env.sc_iev = calloc(1, sizeof(*env.sc_iev))) == NULL) fatal(NULL); env.sc_iev->events = EV_READ; env.sc_iev->data = &env; imsg_init(&env.sc_iev->ibuf, pipe_ntp[1]); env.sc_iev->handler = dns_dispatch_imsg; event_set(&env.sc_iev->ev, env.sc_iev->ibuf.fd, env.sc_iev->events, env.sc_iev->handler, &env); event_add(&env.sc_iev->ev, NULL); event_dispatch(); dns_shutdown(); return (0); } void dns_dispatch_imsg(int fd, short events, void *p) { struct imsg imsg; int n, cnt; char *name; - struct ypldap_addr *h, *hn; + struct ypldap_addr_list hn = TAILQ_HEAD_INITIALIZER(hn); + struct ypldap_addr *h; struct ibuf *buf; struct env *env = p; struct imsgev *iev = env->sc_iev; struct imsgbuf *ibuf = &iev->ibuf; int shut = 0; if ((events & (EV_READ | EV_WRITE)) == 0) fatalx("unknown event"); if (events & EV_READ) { if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) fatal("imsg_read error"); if (n == 0) shut = 1; } if (events & EV_WRITE) { if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN) fatal("msgbuf_write"); if (n == 0) shut = 1; goto done; } for (;;) { if ((n = imsg_get(ibuf, &imsg)) == -1) fatal("client_dispatch_imsg: imsg_get error"); if (n == 0) break; switch (imsg.hdr.type) { case IMSG_HOST_DNS: name = imsg.data; if (imsg.hdr.len < 1 + IMSG_HEADER_SIZE) fatalx("invalid IMSG_HOST_DNS received"); imsg.hdr.len -= 1 + IMSG_HEADER_SIZE; if (name[imsg.hdr.len] != '\0' || strlen(name) != imsg.hdr.len) fatalx("invalid IMSG_HOST_DNS received"); if ((cnt = host_dns(name, &hn)) == -1) break; buf = imsg_create(ibuf, IMSG_HOST_DNS, imsg.hdr.peerid, 0, cnt * sizeof(struct sockaddr_storage)); if (buf == NULL) break; if (cnt > 0) { - h = hn; - while (h != NULL) { + while(!TAILQ_EMPTY(&hn)) { + h = TAILQ_FIRST(&hn); + TAILQ_REMOVE(&hn, h, next); imsg_add(buf, &h->ss, sizeof(h->ss)); - hn = h->next; free(h); - h = hn; } } imsg_close(ibuf, buf); break; default: break; } imsg_free(&imsg); } done: if (!shut) imsg_event_add(iev); else { /* this pipe is dead, so remove the event handler */ event_del(&iev->ev); event_loopexit(NULL); } } int -host_dns(const char *s, struct ypldap_addr **hn) +host_dns(const char *s, struct ypldap_addr_list *hn) { struct addrinfo hints, *res0, *res; int error, cnt = 0; struct sockaddr_in *sa_in; struct sockaddr_in6 *sa_in6; - struct ypldap_addr *h, *hh = NULL; + struct ypldap_addr *h; bzero(&hints, sizeof(hints)); hints.ai_family = PF_UNSPEC; hints.ai_socktype = SOCK_DGRAM; /* DUMMY */ error = getaddrinfo(s, NULL, &hints, &res0); if (error == EAI_AGAIN || error == EAI_NONAME) return (0); if (error) { log_warnx("could not parse \"%s\": %s", s, gai_strerror(error)); return (-1); } for (res = res0; res && cnt < MAX_SERVERS_DNS; res = res->ai_next) { if (res->ai_family != AF_INET && res->ai_family != AF_INET6) continue; if ((h = calloc(1, sizeof(struct ypldap_addr))) == NULL) fatal(NULL); h->ss.ss_family = res->ai_family; if (res->ai_family == AF_INET) { sa_in = (struct sockaddr_in *)&h->ss; sa_in->sin_len = sizeof(struct sockaddr_in); sa_in->sin_addr.s_addr = ((struct sockaddr_in *) res->ai_addr)->sin_addr.s_addr; } else { sa_in6 = (struct sockaddr_in6 *)&h->ss; sa_in6->sin6_len = sizeof(struct sockaddr_in6); memcpy(&sa_in6->sin6_addr, &((struct sockaddr_in6 *) res->ai_addr)->sin6_addr, sizeof(struct in6_addr)); } - h->next = hh; - hh = h; + TAILQ_INSERT_HEAD(hn, h, next); cnt++; } freeaddrinfo(res0); - - *hn = hh; return (cnt); } Index: projects/release-pkg =================================================================== --- projects/release-pkg (revision 297926) +++ projects/release-pkg (revision 297927) Property changes on: projects/release-pkg ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r297906-297926